def get_data(self): mo1, mo2 = self.id.split("-") mo1 = ManagedObject.get_by_id(int(mo1)) if mo1 else None mo2 = ManagedObject.get_by_id(int(mo2)) if mo2 else None s_path = [mo1] if mo1 and mo2: try: s_path = get_shortest_path(mo1, mo2) except ValueError: s_path = [mo1, mo2] path = [] for mo in s_path: if not mo.x or not mo.y: continue if not path or mo.x != path[-1]["x"] or mo.y != path[-1]["y"]: path += [{ "x": mo.x, "y": mo.y, "objects": [{ "id": mo.id, "name": mo.name }] }] else: path[-1]["objects"] += [{"id": mo.id, "name": mo.name}] return {"mo1": mo1, "mo2": mo2, "path": smart_text(orjson.dumps(path))}
def topology_rca_uplink(self, alarm, alarms, seen=None, ts=None): def can_correlate(a1, a2): return (not config.correlator.topology_rca_window or (a1.timestamp - a2.timestamp).total_seconds() <= config.correlator.topology_rca_window) ts = ts or alarm.timestamp seen = seen or set() self.print(">>> topology_rca(%s, %s)" % (alarm.id, "{%s}" % ", ".join(str(x) for x in seen))) if hasattr(alarm, "_trace_root"): self.print("<<< already correlated") return if alarm.id in seen: self.print("<<< already seen") return # Already correlated seen.add(alarm.id) o_id = alarm.managed_object.id # Get neighbor objects neighbors = set() uplinks = [] ou = ObjectData.get_by_id(object=o_id) if ou and ou.uplinks: uplinks = ou.uplinks neighbors.update(uplinks) for du in ObjectData.get_neighbors(o_id): neighbors.add(du) if not neighbors: self.print("<<< no neighbors") return # Get neighboring alarms na = {} for n in neighbors: a = alarms.get(n) if a and a.timestamp <= ts: na[n] = a self.print(" Neighbor alarms: %s" % ", ".join("%s%s (%s)" % ("U:" if x in uplinks else "", na[x], ManagedObject.get_by_id(x).name) for x in na)) self.print(" Uplinks: %s" % ", ".join(ManagedObject.get_by_id(u).name for u in uplinks)) if uplinks and len([na[o] for o in uplinks if o in na]) == len(uplinks): # All uplinks are faulty # uplinks are ordered according to path length # Correlate with first applicable for u in uplinks: a = na[u] if can_correlate(alarm, a): self.print("+++ SET ROOT %s -> %s" % (alarm.id, a.id)) alarm._trace_root = a.id break # Correlate neighbors' alarms for d in na: self.topology_rca_uplink(na[d], alarms, seen, ts) self.print("<<< done")
def find_object(cls, mac=None, ipv4_address=None): """ Find managed object :param mac: :param ipv4_address: :param cls: :return: Managed object instance or None """ def has_ip(ip, addresses): x = ip + "/" for a in addresses: if a.startswith(x): return True return False # Find by mac if mac: metrics["discoveryid_mac_requests"] += 1 r = cls.get_by_mac(mac) if r: return ManagedObject.get_by_id(r["object"]) if ipv4_address: metrics["discoveryid_ip_requests"] += 1 # Try router_id d = DiscoveryID.objects.filter(router_id=ipv4_address).first() if d: metrics["discoveryid_ip_routerid"] += 1 return d.object # Fallback to interface addresses o = set( d["managed_object"] for d in SubInterface._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED).find( { "ipv4_addresses": { "$gt": ipv4_address + "/", "$lt": ipv4_address + "/99" } }, { "_id": 0, "managed_object": 1, "ipv4_addresses": 1 }, ) if has_ip(ipv4_address, d["ipv4_addresses"])) if len(o) == 1: metrics["discoveryid_ip_interface"] += 1 return ManagedObject.get_by_id(list(o)[0]) metrics["discoveryid_ip_failed"] += 1 return None
def extract(self): nr = 0 for d in (Reboot._get_collection().find( { "ts": { "$gt": self.start, "$lte": self.stop } }, no_cursor_timeout=True).sort("ts")): mo = ManagedObject.get_by_id(d["object"]) if not mo: continue self.reboot_stream.push( ts=d["ts"], managed_object=mo, pool=mo.pool, ip=mo.address, profile=mo.profile, object_profile=mo.object_profile, vendor=mo.vendor, platform=mo.platform, version=mo.version, administrative_domain=mo.administrative_domain, segment=mo.segment, container=mo.container, x=mo.x, y=mo.y, ) nr += 1 self.last_ts = d["ts"] self.reboot_stream.finish() return nr
def api_dashboard(self, request): dash_name = request.GET.get("dashboard") try: # ddash by cals oid = request.GET.get("id") mo = ManagedObject.get_by_id(oid) if mo.get_caps().get("Sensor | Controller"): dash_name = "sensor_controller" if mo.get_caps().get("Network | DVBC"): dash_name = "modvbc" except Exception: pass try: dt = loader[dash_name] except Exception: self.logger.error("Exception when loading dashboard: %s", request.GET.get("dashboard")) return self.response_not_found("Dashboard not found") if not dt: return self.response_not_found("Dashboard not found") extra_vars = {} for v in request.GET: if v.startswith("var_"): extra_vars[v] = request.GET[v] extra_template = request.GET.get("extra_template") try: dashboard = dt(oid, extra_template, extra_vars) except BaseDashboard.NotFound: return self.response_not_found("Object not found") return dashboard.render()
def get_data(self): mo1, mo2 = [ ManagedObject.get_by_id(int(x)) for x in self.id.split("-") ] try: s_path = get_shortest_path(mo1, mo2) except ValueError: s_path = [mo1, mo2] path = [] for mo in s_path: if not mo.x or not mo.y: continue if not path or mo.x != path[-1]["x"] or mo.y != path[-1]["y"]: path += [{ "x": mo.x, "y": mo.y, "objects": [{ "id": mo.id, "name": mo.name }] }] else: path[-1]["objects"] += [{"id": mo.id, "name": mo.name}] return {"mo1": mo1, "mo2": mo2, "path": ujson.dumps(path)}
def fix_model(model): coll = model._get_collection() ins = defaultdict(list) bulk = [] for doc in coll.find({"managed_object_profile": { "$exists": False }}, { "_id": 1, "managed_object": 1 }): mo = ManagedObject.get_by_id(doc["managed_object"]) if not mo: continue mop = mo.object_profile.id ins[mop] += [doc["_id"]] if len(ins[mop]) >= IN_SIZE: bulk += [ UpdateMany({"_id": { "$in": ins[mop] }}, {"$set": { "managed_object_profile": mop }}) ] ins[mop] = [] if len(bulk) >= BULK_SIZE: coll.bulk_write(bulk) bulk = [] if bulk: coll.bulk_write(bulk)
def api_hits(self, request, id): rule = self.get_object_or_404(ValidationRule, id=id) ar = ObjectFact._get_collection().aggregate([{ "$match": { "attrs.rule": str(rule.id) } }, { "$group": { "_id": "$object", "hits": { "$sum": 1 } } }, { "$sort": { "hits": -1 } }]) r = [] for x in ar: mo = ManagedObject.get_by_id(x["_id"]) if not mo: continue r += [{ "managed_object_id": mo.id, "managed_object": mo.name, "address": mo.address, "platform": mo.platform.name if mo.platform else "", "hits": x["hits"] }] return r
def handle_tokenizer(self, object=None, profile=None, config=None, *args, **kwargs): cfg = None if config: if not os.path.exists(config): self.die("File not found: %s" % config) with open(config) as f: cfg = f.read() if object: connect() mo = ManagedObject.get_by_id(object) if not mo: self.die("Managed Object not found") elif profile: p = loader.get_profile(profile) if not p: self.die("Invalid profile: %s" % profile) if not cfg: self.die("Specify config file with --config option") # Mock up tokenizer connect() mo = ManagedObject.mock_object(profile=profile) else: self.die("Eigther object or profile must be set") tokenizer = mo.iter_config_tokens(config=cfg) for token in tokenizer: self.print(token)
def get_data(self, request, interval, from_date=None, to_date=None, **kwargs): interval = int(interval) if not from_date: interval = 1 if interval: ts = datetime.datetime.now() - datetime.timedelta(days=interval) q = {"timestamp": {"$gte": ts}} else: t0 = datetime.datetime.strptime(from_date, "%d.%m.%Y") if not to_date: t1 = datetime.datetime.now() else: t1 = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) q = {"timestamp": {"$gte": t0, "$lte": t1}} q["escalation_tt"] = {"$exists": True} if not request.user.is_superuser: q["adm_path"] = {"$in": UserAccess.get_domains(request.user)} data = [] for ac in (ActiveAlarm, ArchivedAlarm): for d in ac._get_collection().find(q): mo = ManagedObject.get_by_id(d["managed_object"]) if not mo: continue data += [( d["timestamp"].strftime("%Y-%m-%d %H:%M:%S"), d["escalation_ts"].strftime("%Y-%m-%d %H:%M:%S"), mo.name.split("#", 1)[0], mo.address, mo.platform, mo.segment.name, d["escalation_tt"], sum(ss["summary"] for ss in d["total_objects"]), sum(ss["summary"] for ss in d["total_subscribers"]), )] data = sorted(data, key=operator.itemgetter(0)) return self.from_dataset( title=self.title, columns=[ _("Timestamp"), _("Escalation Timestamp"), _("Managed Object"), _("Address"), _("Platform"), _("Segment"), _("TT"), _("Objects"), _("Subscribers"), ], data=data, enumerate=True, )
def handler(self, object_id): mo = ManagedObject.get_by_id(int(object_id)) if not mo: return 404, "Not Found" revs = [ {"revision": str(r.id), "timestamp": r.ts.isoformat()} for r in mo.config.get_revisions() ] return 200, revs
def get_annotations(self, f, t, annotation): # @todo: Check object is exists # @todo: Check access mo = ManagedObject.get_by_id(int(annotation["query"])) r = [] # Get alarms r += self.get_alarms(mo, f, t, annotation) r = sorted(r, key=operator.itemgetter("time")) return ujson.dumps(r)
def get_data(self, request, pool=None, selector=None, report_type=None, **kwargs): data = [] columns, columns_desr = [], [] r_map = [ (_("Not Available"), "2is1.3isp1.3is1"), (_("Failed to guess CLI credentials"), "2is1.3isp0.2isp1"), (_("Failed to guess SNMP community"), "2is1.3isp1.3is2.1isp1") ] for x, y in r_map: columns += [y] columns_desr += [x] mos = ManagedObject.objects.filter() if pool: mos = mos.filter(pool=pool) data += [SectionRow(name=pool.name)] if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) mos = list(mos.values_list("id", flat=True).order_by("id")) mos_s = set(mos) report = ReportModelFilter() result = report.proccessed(",".join(columns)) mo_hostname = ReportObjectsHostname1(sync_ids=mos) mo_hostname = mo_hostname.get_dictionary() d_result = ReportDiscoveryResult(sync_ids=mos) d_result = d_result.get_dictionary() for col in columns: for mo_id in result[col.strip()].intersection(mos_s): mo = ManagedObject.get_by_id(mo_id) problem = self.decode_problem(d_result.get(mo_id)) if not problem and mo_id not in d_result: problem = "Discovery disabled" data += [( mo.name, mo.address, mo.administrative_domain.name, mo.profile.name, mo_hostname.get(mo.id, ""), mo.auth_profile if mo.auth_profile else "", mo.auth_profile.user if mo.auth_profile else mo.user, mo.auth_profile.snmp_ro if mo.auth_profile else mo.snmp_ro, _("No") if not mo.get_status() else _("Yes"), columns_desr[columns.index(col)], problem )] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("Administrative Domain"), _("Profile"), _("Hostname"), _("Auth Profile"), _("Username"), _("SNMP Community"), _("Avail"), _("Error"), _("Error Detail") ], data=data)
def api_info_link(self, request, id, link_id): def q(s): if isinstance(s, str): s = s.encode("utf-8") return s self.get_object_or_404(NetworkSegment, id=id) link = self.get_object_or_404(Link, id=link_id) r = { "id": str(link.id), "name": link.name or None, "description": link.description or None, "objects": [], "method": link.discovery_method, } o = defaultdict(list) for i in link.interfaces: o[i.managed_object] += [i] for mo in sorted(o, key=lambda x: x.name): r["objects"] += [{ "id": mo.id, "name": mo.name, "interfaces": [{ "name": i.name, "description": i.description or None, "status": i.status } for i in sorted(o[mo], key=lambda x: alnum_key(x.name))], }] # Get link bandwidth mo_in = defaultdict(float) mo_out = defaultdict(float) mos = [ManagedObject.get_by_id(mo["id"]) for mo in r["objects"]] metric_map, last_ts = get_interface_metrics(list(o)) for mo in o: if mo not in metric_map: continue for i in o[mo]: if i.name not in metric_map[mo]: continue mo_in[mo] += metric_map[mo][i.name]["Interface | Load | In"] mo_out[mo] += metric_map[mo][i.name]["Interface | Load | Out"] if len(mos) == 2: mo1, mo2 = mos r["utilisation"] = [ int(max(mo_in[mo1], mo_out[mo2])), int(max(mo_in[mo2], mo_out[mo1])), ] else: mv = list(mo_in.values()) + list(mo_out.values()) if mv: r["utilisation"] = [int(max(mv))] else: r["utilisation"] = 0 return r
def get_data(self, **kwargs): old = datetime.datetime.now() - datetime.timedelta( minutes=self.STALE_INTERVAL) data = [] for pool in Pool._get_collection().find({}, {"_id": 0, "name": 1}): scheduler = Scheduler("discovery", pool=pool["name"]) for r in scheduler.get_collection().find({ "runs": { "$gt": 1 }, "jcls": { "$regex": "_discovery$" }, "st": { "$lte": old } }): mo = ManagedObject.get_by_id(r["key"]) if not mo or not mo.is_managed: continue msg = "" if r["tb"]: tb = r["tb"] if "text" in tb and "code" in tb: if tb["text"].endswith("END OF TRACEBACK"): tb["text"] = "Job crashed" msg = "(%s) %s" % (tb["text"], tb["code"]) data += [[ mo.administrative_domain.name, mo.name, mo.profile.name, mo.platform.name, mo.version.name, mo.address, mo.segment.name, r["jcls"], humanize_distance(r["st"]), msg, ]] return self.from_dataset( title=self.title, columns=[ _("Admin. Domain"), _("Object"), _("Profile"), _("Platform"), _("Version"), _("Address"), _("Segment"), _("Job"), _("Last Success"), _("Reason"), ], data=sorted(data), enumerate=True, )
def get_data(self, request, **kwargs): data = [] # Find object with equal ID find = DiscoveryID._get_collection().aggregate([{ "$group": { "_id": "$macs", "count": { "$sum": 1 } } }, { "$match": { "count": { "$gt": 1 } } }]) for f in find: # DiscoveryID.objects.filter(chassis_mac=f["_id"]) if not f["_id"]: # Empty DiscoveryID continue data_c = [] reason = "Other" for r in DiscoveryID._get_collection().find({"macs": f["_id"][0]}, { "_id": 0, "object": 1 }): # ManagedObject.get_by_id(o) mo = ManagedObject.get_by_id(r["object"]) if len(data_c) > 0: if mo.address == data_c[-1][1]: reason = _("Duplicate MO") elif not mo.is_managed == data_c[-1][3]: reason = _("MO is move") data_c += [(mo.name, mo.address, mo.profile.name, mo.is_managed)] data += [SectionRow(name="%s %s" % (f["_id"][0], reason))] data += data_c return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("Profile"), _("is managed") ], data=data, )
def iter_neighbors(n_ids: Iterable[int]) -> Iterable[ManagedObject]: for m_id in n_ids: n_mo = self.mo_cache.get(m_id) if n_mo: yield n_mo else: n_mo = ManagedObject.get_by_id(m_id) if n_mo: self.mo_cache[n_mo.id] = n_mo yield n_mo
def get_data(self, request, pool=None, int_profile=None, mop=None, avail_status=None, **kwargs): data = [] mos = ManagedObject.objects.filter(is_managed=True) # % fixme remove. if not pool and request.user.is_superuser: pool = Pool.get_by_name("STAGEMO") if pool: mos = mos.filter(pool=pool) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if mop: mos = mos.filter(object_profile=mop) mos_ids = mos.values_list("id", flat=True) iface = Interface.objects.filter(managed_object__in=mos, profile=int_profile, type="physical").values_list( "managed_object", "name") res = [] n = 0 # Interface._get_collection() while mos_ids[(0 + n):(10000 + n)]: mos_ids_f = mos_ids[(0 + n):(10000 + n)] s_iface = {"%d.%s" % (mo.id, name) for mo, name in iface} of = ObjectFact.objects.filter( object__in=mos_ids_f, cls="subinterface", attrs__traffic_control_broadcast=False) a_f = {".".join((str(o.object.id), o.attrs["name"])) for o in of} res.extend(a_f.intersection(s_iface)) n += 10000 for s in res: mo, iface = s.split(".") mo = ManagedObject.get_by_id(mo) data += [(mo.name, mo.address, mo.profile.name, iface)] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("SA Profile"), _("Interface") ], data=data, )
def handler(self, object_id, revision): mo = ManagedObject.get_by_id(int(object_id)) if not mo: return 404, "Not Found" if revision: if not mo.config.has_revision(revision): return 404, "Revision not found" config = mo.config.get_revision(revision) else: config = mo.config.read() if config is None: return 204, "" return 200, config
async def on_event(self, msg: Message): # Decode message event = orjson.loads(msg.value) object = event.get("object") data = event.get("data") # Process event event_ts = datetime.datetime.fromtimestamp(event.get("ts")) # Generate or reuse existing object id event_id = ObjectId(event.get("id")) # Calculate message processing delay lag = (time.time() - float(msg.timestamp) / NS) * 1000 metrics["lag_us"] = int(lag * 1000) self.logger.debug("[%s] Receiving new event: %s (Lag: %.2fms)", event_id, data, lag) metrics[CR_PROCESSED] += 1 # Resolve managed object mo = ManagedObject.get_by_id(object) if not mo: self.logger.info("[%s] Unknown managed object id %s. Skipping", event_id, object) metrics[CR_UOBJECT] += 1 return self.logger.info("[%s|%s|%s] Managed object found", event_id, mo.name, mo.address) # Process event source = data.pop("source", "other") event = ActiveEvent( id=event_id, timestamp=event_ts, start_timestamp=event_ts, managed_object=mo, source=source, repeats=1, ) # raw_vars will be filled by classify_event() # Ignore event if self.patternset.find_ignore_rule(event, data): self.logger.debug("Ignored event %s vars %s", event, data) metrics[CR_IGNORED] += 1 return # Classify event try: await self.classify_event(event, data) except Exception as e: self.logger.error("[%s|%s|%s] Failed to process event: %s", event.id, mo.name, mo.address, e) metrics[CR_FAILED] += 1 return self.logger.info("[%s|%s|%s] Event processed successfully", event.id, mo.name, mo.address)
def on_event(self, message, ts=None, object=None, data=None, id=None, *args, **kwargs): event_ts = datetime.datetime.fromtimestamp(ts) # Generate or reuse existing object id event_id = ObjectId(id) # Calculate messate processing delay lag = (time.time() - ts) * 1000 metrics["lag_us"] = int(lag * 1000) self.logger.debug("[%s] Receiving new event: %s (Lag: %.2fms)", event_id, data, lag) metrics[CR_PROCESSED] += 1 # Resolve managed object mo = ManagedObject.get_by_id(object) if not mo: self.logger.info("[%s] Unknown managed object id %s. Skipping", event_id, object) metrics[CR_UOBJECT] += 1 return True self.logger.info("[%s|%s|%s] Managed object found", event_id, mo.name, mo.address) # Process event source = data.pop("source", "other") event = ActiveEvent( id=event_id, timestamp=event_ts, start_timestamp=event_ts, managed_object=mo, source=source, repeats=1, ) # raw_vars will be filled by classify_event() # Classify event try: self.classify_event(event, data) except Exception as e: self.logger.error("[%s|%s|%s] Failed to process event: %s", event.id, mo.name, mo.address, e) metrics[CR_FAILED] += 1 return False self.logger.info("[%s|%s|%s] Event processed successfully", event.id, mo.name, mo.address) return True
def get_stp_status(object_id): roots = set() blocked = set() object = ManagedObject.get_by_id(object_id) sr = object.scripts.get_spanning_tree() for instance in sr["instances"]: ro = DiscoveryID.find_object(instance["root_id"]) if ro: roots.add(ro) for i in instance["interfaces"]: if i["state"] == "discarding" and i["role"] == "alternate": iface = object.get_interface(i["interface"]) if iface: link = iface.link if link: blocked.add(str(link.id)) return object_id, roots, blocked
def get_connected(self, mo): """ Return managed objects connected to mo """ from noc.sa.models.managedobject import ManagedObject if hasattr(mo, "id"): mo = mo.id r = set() for n in ExtNRILink.objects.filter(Q(src_mo=mo) | Q(dst_mo=mo)): if n.src_mo == mo: rmo = n.dst_mo else: rmo = n.src_mo m = ManagedObject.get_by_id(rmo) if m: r.add(m) return r
def find_all_objects(cls, mac): """ Find objects for mac :return: dict of ManagedObjects ID for resolved MAC """ r = [] if not mac: return r metrics["discoveryid_mac_requests"] += 1 for d in DiscoveryID._get_collection().find({"macs": int(MAC(mac))}, { "_id": 0, "object": 1, "chassis_mac": 1 }): mo = ManagedObject.get_by_id(d["object"]) if mo: r.append(mo.id) return r
def find_objects(cls, macs): """ Find objects for list of macs :param macs: List of MAC addresses :return: dict of MAC -> ManagedObject for resolved MACs """ r = {} if not macs: return r # Build list of macs to search mlist = sorted(int(MAC(m)) for m in macs) # Search for macs obj_ranges = {} # (first, last) -> mo for d in DiscoveryID._get_collection().find({"macs": { "$in": mlist }}, { "_id": 0, "object": 1, "chassis_mac": 1 }): mo = ManagedObject.get_by_id(d["object"]) if mo: for dd in d.get("chassis_mac", []): obj_ranges[int(MAC(dd["first_mac"])), int(MAC(dd["last_mac"]))] = mo n = 1 for s, e in obj_ranges: n += 1 # Resolve ranges start = 0 ll = len(mlist) for s, e in sorted(obj_ranges): mo = obj_ranges[s, e] start = bisect.bisect_left(mlist, s, start, ll) while start < ll and s <= mlist[start] <= e: r[MAC(mlist[start])] = mo start += 1 return r
def get_data(self, request, pool=None, obj_profile=None, selector=None, avail_status=None, profile_check_only=None, failed_scripts_only=None, filter_pending_links=None, filter_none_objects=None, filter_view_other=None, **kwargs): data = [] match = None code_map = { "1": "Unknown error", "10000": "Unspecified CLI error", "10005": "Connection refused", "10001": "Authentication failed", "10002": "No super command defined", "10003": "No super privileges", "10004": "SSH Protocol error" } if not pool: pool = Pool.objects.filter()[0] data += [SectionRow(name="Report by %s" % pool.name)] if selector: mos = ManagedObject.objects.filter(selector.Q) else: mos = ManagedObject.objects.filter(pool=pool, is_managed=True) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if obj_profile: mos = mos.filter(object_profile=obj_profile) if filter_view_other: mnp_in = list( ManagedObjectProfile.objects.filter(enable_ping=False)) mos = mos.filter(profile=Profile.objects.get( name=GENERIC_PROFILE)).exclude(object_profile__in=mnp_in) discovery = "noc.services.discovery.jobs.box.job.BoxDiscoveryJob" mos_id = list(mos.values_list("id", flat=True)) if avail_status: avail = ObjectStatus.get_statuses(mos_id) if profile_check_only: match = { "$or": [{ "job.problems.suggest_cli": { "$exists": True } }, { "job.problems.suggest_snmp": { "$exists": True } }, { "job.problems.profile.": { "$regex": "Cannot detect profile" } }, { "job.problems.version.": { "$regex": "Remote error code 1000[1234]" } }] } elif failed_scripts_only: match = { "$and": [{ "job.problems": { "$exists": "true", "$ne": {} } }, { "job.problems.suggest_snmp": { "$exists": False } }, { "job.problems.suggest_cli": { "$exists": False } }] } elif filter_view_other: match = {"job.problems.suggest_snmp": {"$exists": False}} rdp = ReportDiscoveryProblem(mos, avail_only=avail_status, match=match) exclude_method = [] if filter_pending_links: exclude_method += ["lldp", "lacp", "cdp", "huawei_ndp"] for discovery in rdp: mo = ManagedObject.get_by_id(discovery["key"]) for method in ifilterfalse(lambda x: x in exclude_method, discovery["job"][0]["problems"]): problem = discovery["job"][0]["problems"][method] if filter_none_objects and not problem: continue if isinstance(problem, dict) and "" in problem: problem = problem.get("", "") if "Remote error code" in problem: problem = code_map.get(problem.split(" ")[-1], problem) if isinstance(problem, six.string_types): problem = problem.replace("\n", " ").replace("\r", " ") data += [(mo.name, mo.address, mo.profile.name, mo.administrative_domain.name, _("Yes") if mo.get_status() else _("No"), discovery["st"].strftime("%d.%m.%Y %H:%M") if "st" in discovery else "", method, problem)] return self.from_dataset(title=self.title, columns=[ _("Managed Object"), _("Address"), _("Profile"), _("Administrative Domain"), _("Avail"), _("Last successful discovery"), _("Discovery"), _("Error") ], data=data)
def get_object_data(self, object_id): """ Worker to resolve credentials """ object_id = int(object_id) # Get Object's attributes with self.service.get_pg_connect() as connection: cursor = connection.cursor() cursor.execute(self.RUN_SQL, [object_id, object_id]) data = cursor.fetchall() if not data: metrics["error", ("type", "object_not_found")] += 1 raise APIError("Object is not found") # Build capabilities capabilities = ObjectCapabilities.get_capabilities(object_id) # Get object credentials ( name, is_managed, profile, vendor, platform, version, scheme, address, port, user, password, super_password, remote_path, snmp_ro, pool_id, sw_image, auth_profile_id, ap_user, ap_password, ap_super_password, ap_snmp_ro, ap_snmp_rw, privilege_policy, snmp_rate_limit, p_privilege_policy, p_snmp_rate_limit, access_preference, p_access_preference, beef_storage_id, beef_path_template_id, attrs, ) = data[0] # Check object is managed if not is_managed: metrics["error", ("type", "object_not_managed")] += 1 raise APIError("Object is not managed") if auth_profile_id: user = ap_user password = ap_password super_password = ap_super_password snmp_ro = ap_snmp_ro snmp_rw = ap_snmp_rw # noqa just to be # if privilege_policy == "E": raise_privileges = True elif privilege_policy == "P": raise_privileges = p_privilege_policy == "E" else: raise_privileges = False if access_preference == "P": access_preference = p_access_preference if not snmp_rate_limit: snmp_rate_limit = p_snmp_rate_limit # Build credentials credentials = { "name": name, "address": address, "user": user, "password": password, "super_password": super_password, "path": remote_path, "raise_privileges": raise_privileges, "access_preference": access_preference, "snmp_rate_limit": snmp_rate_limit, } if snmp_ro: credentials["snmp_ro"] = snmp_ro if capabilities.get("SNMP | v2c"): credentials["snmp_version"] = "v2c" elif capabilities.get("SNMP | v1"): credentials["snmp_version"] = "v1" if scheme in CLI_PROTOCOLS: credentials["cli_protocol"] = PROTOCOLS[scheme] if port: credentials["cli_port"] = port elif scheme in HTTP_PROTOCOLS: credentials["http_protocol"] = PROTOCOLS[scheme] if port: credentials["http_port"] = port # Build version if vendor and platform and version: vendor = Vendor.get_by_id(vendor) version = { "vendor": vendor.code[0] if vendor.code else vendor.name, "platform": Platform.get_by_id(platform).name, "version": Firmware.get_by_id(version).version, } if sw_image: version["image"] = sw_image if attrs: version["attributes"] = attrs else: version = None # Beef processing if scheme == BEEF and beef_storage_id and beef_path_template_id: mo = ManagedObject.get_by_id(object_id) tpl = Template.get_by_id(beef_path_template_id) beef_path = tpl.render_subject(object=mo) if beef_path: storage = ExtStorage.get_by_id(beef_storage_id) credentials["beef_storage_url"] = storage.url credentials["beef_path"] = beef_path return dict( profile=Profile.get_by_id(profile).name, pool_id=pool_id, credentials=credentials, capabilities=capabilities, version=version, )
def extract(self, *args, **options): nr = 0 # Get reboots r = Reboot._get_collection().aggregate([ { "$match": { "ts": { "$gt": self.start - self.reboot_interval, "$lte": self.stop } } }, { "$sort": { "ts": 1 } }, { "$group": { "_id": "$object", "reboots": { "$push": "$ts" } } }, ]) # object -> [ts1, .., tsN] reboots = {d["_id"]: d["reboots"] for d in r} # for d in self.iter_data(): mo = ManagedObject.get_by_id(d["managed_object"]) if not mo: continue # Process reboot data o_reboots = reboots.get(d["managed_object"], []) n_reboots = hits_in_range(o_reboots, d["timestamp"] - self.reboot_interval, d["clear_timestamp"]) # self.alarm_stream.push( ts=d["timestamp"], close_ts=d["clear_timestamp"], duration=max( 0, int((d["clear_timestamp"] - d["timestamp"]).total_seconds())), alarm_id=str(d["_id"]), root=str(d.get("root") or ""), rca_type=d.get("rca_type") or 0, alarm_class=AlarmClass.get_by_id(d["alarm_class"]), severity=d["severity"], reopens=d.get("reopens") or 0, direct_services=sum(ss["summary"] for ss in d.get("direct_services", [])), direct_subscribers=sum( ss["summary"] for ss in d.get("direct_subscribers", [])), total_objects=sum(ss["summary"] for ss in d.get("total_objects", [])), total_services=sum(ss["summary"] for ss in d.get("total_services", [])), total_subscribers=sum( ss["summary"] for ss in d.get("total_subscribers", [])), escalation_ts=d.get("escalation_ts"), escalation_tt=d.get("escalation_tt"), managed_object=mo, pool=mo.pool, ip=mo.address, profile=mo.profile, object_profile=mo.object_profile, vendor=mo.vendor, platform=mo.platform, version=mo.version, administrative_domain=mo.administrative_domain, segment=mo.segment, container=mo.container, x=mo.x, y=mo.y, reboots=n_reboots, services=[{ "profile": ServiceProfile.get_by_id(ss["profile"]).bi_id, "summary": ss["summary"], } for ss in d.get("direct_services", [])], subscribers=[{ "profile": SubscriberProfile.get_by_id(ss["profile"]).bi_id, "summary": ss["summary"], } for ss in d.get("direct_subscribers", [])], # location=mo.container.get_address_text() ack_user=d.get("ack_user", ""), ack_ts=d.get("ack_ts"), ) nr += 1 self.last_ts = d["clear_timestamp"] self.alarm_stream.finish() return nr
def handle_apply(self, *args, **options): connect() self.mo_cache = {} self.mc_cache = {} self.bulk = [] self.collection = ExtNRILink._get_collection() self.stdout.write("Apply NRI links from %s\n" % ExtNRILink._meta["collection"]) for l in ExtNRILink.objects.filter(link__exists=False): # Get objects src_mo = ManagedObject.get_by_id(l.src_mo) if not src_mo or src_mo.profile.is_generic: continue dst_mo = ManagedObject.get_by_id(l.dst_mo) if not dst_mo or dst_mo.profile.is_generic: continue # if src_mo.id == dst_mo.id: self.update_warn(l.id, "Loop link") continue # Get port mappers src_pm = self.get_port_mapper(src_mo) if not src_pm: self.update_warn( l.id, "No port mapper for %s (%s)" % (src_mo.name, src_mo.platform or src_mo.profile.name), ) continue dst_pm = self.get_port_mapper(dst_mo) if not dst_pm: self.update_warn( l.id, "No port mapper for %s (%s)" % (dst_mo.name, dst_mo.platform or dst_mo.profile.name), ) continue # Map interfaces src_ifname = src_pm(src_mo).to_local(l.src_interface) if not src_ifname: self.update_warn( l.id, "Cannot map interface %s for %s (%s)" % (l.src_interface, src_mo.name, src_mo.platform or src_mo.profile.name), ) continue dst_ifname = dst_pm(dst_mo).to_local(l.dst_interface) if not dst_ifname: self.update_warn( l.id, "Cannot map interface %s for %s (%s)" % (l.dst_interface, dst_mo.name, dst_mo.platform or dst_mo.profile.name), ) continue # Find interfaces in NOC's inventory src_iface = self.get_interface(src_mo, src_ifname) if not src_iface: self.update_warn( l.id, "Interface not found %s@%s\n" % (src_mo.name, src_ifname)) continue dst_iface = self.get_interface(dst_mo, dst_ifname) if not dst_iface: self.update_warn( l.id, "Interface not found %s@%s\n" % (dst_mo.name, dst_ifname)) continue # src_link = src_iface.link dst_link = dst_iface.link if not src_link and not dst_link: self.stdout.write( "%s: %s -- %s: %s: Linking\n" % (src_mo.name, src_ifname, dst_mo.name, dst_ifname)) src_link = src_iface.link_ptp(dst_iface, method="nri") self.update_nri(l.id, link=src_link.id) elif src_link and dst_link and src_link.id == dst_link.id: self.stdout.write( "%s: %s -- %s: %s: Already linked\n" % (src_mo.name, src_ifname, dst_mo.name, dst_ifname)) self.update_nri(l.id, link=src_link.id) elif src_link and not dst_link: self.update_error(l.id, "Linked to: %s" % src_link) elif src_link is None and dst_link: self.update_error(l.id, "Linked to: %s" % dst_link) if self.bulk: self.stdout.write("Commiting changes to database\n") try: self.collection.bulk_write(self.bulk) self.stdout.write("Database has been synced\n") except BulkWriteError as e: self.stdout.write("Bulk write error: '%s'\n", e.details) else: self.stdout.write("Nothing changed\n")
def get_object_and_interface(self, object=None, interface=None, service=None): # type: (Optional[Dict[str, Any]], Optional[Dict[str, Any]], Optional[Dict[str, Any]], Optional[Dict[str, Any]]) -> Tuple[ManagedObject, Optional[Interface]] """ Process from and to section of request and get object and interface :param object: request.object :param interface: request.interface :param service: request.service :return: ManagedObject Instance, Optional[Interface Instance] :raises ValueError: """ if object: if "id" in object: # object.id mo = ManagedObject.get_by_id(object["id"]) elif "remote_system" in object: # object.remote_system/remote_id rs = RemoteSystem.get_by_id(object["remote_system"]) if not rs: raise ValueError("Remote System not found") mo = ManagedObject.objects.filter( remote_system=rs.id, remote_id=object["remote_id"]).first() else: raise ValueError("Neither id or remote system specified") if not mo: raise ValueError("Object not found") if interface: # Additional interface restriction iface = mo.get_interface(interface["name"]) if iface is None: raise ValueError("Interface not found") return mo, iface else: # No interface restriction return mo, None if interface: iface = Interface.objects.filter(id=interface["id"]).first() if not iface: raise ValueError("Interface not found") return iface.managed_object, iface if service: if "id" in service: svc = Service.objects.filter("id").first() elif "remote_system" in service: rs = RemoteSystem.get_by_id(object["remote_system"]) if not rs: raise ValueError("Remote System not found") svc = Service.objects.filter( remote_system=rs.id, remote_id=service["remote_id"]).first() else: raise ValueError("Neither id or remote system specified") if svc is None: raise ValueError("Service not found") iface = Interface.objects.filter(service=svc.id).first() if not iface: raise ValueError("Interface not found") return iface.managed_object, iface raise ValueError("Invalid search condition")