def api_links(self, request, id): o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") # Get links result = [] for link in Link.object_links(o): ifaces = [] r = [] for i in link.interfaces: if i.managed_object.id == o.id: ifaces += [i] else: r += [i] for li, ri in zip(ifaces, r): result += [{ "link_id": str(link.id), "local_interface": str(li.id), "local_interface__label": li.name, "remote_object": ri.managed_object.id, "remote_object__label": ri.managed_object.name, "remote_platform": ri.managed_object.platform.name if ri.managed_object.platform else "", "remote_interface": str(ri.id), "remote_interface__label": ri.name, "discovery_method": link.discovery_method, "local_description": li.description, "remote_description": ri.description, "first_discovered": link.first_discovered.isoformat() if link.first_discovered else None, "last_seen": link.last_seen.isoformat() if link.last_seen else None }] return result
def api_links(self, request, id): o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") # Get links result = [] for link in Link.object_links(o): l = [] r = [] for i in link.interfaces: if i.managed_object.id == o.id: l += [i] else: r += [i] for li, ri in zip(l, r): result += [{ "id": str(link.id), "local_interface": str(li.id), "local_interface__label": li.name, "remote_object": ri.managed_object.id, "remote_object__label": ri.managed_object.name, "remote_interface": str(ri.id), "remote_interface__label": ri.name, "discovery_method": link.discovery_method, "commited": True, "local_description": li.description, "remote_description": ri.description }] # Get pending links q = MQ(local_object=o.id) | MQ(remote_object=o.id) for link in PendingLinkCheck.objects.filter(q): if link.local_object.id == o.id: ro = link.remote_object lin = link.local_interface rin = link.remote_interface else: ro = link.local_object lin = link.remote_interface rin = link.local_interface li = Interface.objects.filter(managed_object=o.id, name=lin).first() if not li: continue ri = Interface.objects.filter(managed_object=ro.id, name=rin).first() if not ri: continue result += [{ "id": str(link.id), "local_interface": str(li.id), "local_interface__label": li.name, "remote_object": ro.id, "remote_object__label": ro.name, "remote_interface": str(ri.id), "remote_interface__label": ri.name, "discovery_method": link.method, "commited": False, "local_description": li.description, "remote_description": ri.description }] return result
def iter_interfaces_meta(self): # Get all interfaces ifaces = { iface.name: iface for iface in Interface.objects.filter( managed_object=self.object.id) } own_ifaces = set(ifaces[iface].id for iface in ifaces) # Get all links links = {} # interface -> object -> [remote_interface, ...] for link in Link.object_links(self.object): local_interfaces = set() remote_interfaces = {} # object -> [interfaces] for i in link.interfaces: if i.id in own_ifaces: local_interfaces.add(i.name) else: if i.managed_object not in remote_interfaces: remote_interfaces[i.managed_object] = [] remote_interfaces[i.managed_object] += [i.name] for li in local_interfaces: links[li] = remote_interfaces # Yield meta for all interfaces for ctx in self.confdb.query("Match('interfaces', ifname)"): iface = ifaces.get(ctx["ifname"]) if not iface: continue # interfaces X meta profile if iface.profile: yield "interfaces", iface.name, "meta", "profile", "id", str( iface.profile.id) yield "interfaces", iface.name, "meta", "profile", "name", iface.profile.name if iface.ifindex is not None: yield "interfaces", iface.name, "meta", "ifindex", int( iface.ifindex) if iface.mac: yield "interfaces", iface.name, "meta", "mac", str(iface.mac) # interfaces X meta hints if iface.hints: for hint in iface.hints: yield "interfaces", iface.name, "meta", "hints", hint # interfaces X meta link if iface.name in links: for n, ro in enumerate(sorted(links[iface.name], key=str)): n = str(n) yield "interfaces", iface.name, "meta", "link", n, "object", "id", str( ro.id) yield "interfaces", iface.name, "meta", "link", n, "object", "name", ro.name yield "interfaces", iface.name, "meta", "link", n, "object", "profile", "id", str( ro.object_profile.id) yield "interfaces", iface.name, "meta", "link", n, "object", "profile", "name", ro.object_profile.name yield "interfaces", iface.name, "meta", "link", n, "object", "profile", "level", ro.object_profile.level for ri in sorted(links[iface.name][ro]): yield "interfaces", iface.name, "meta", "link", n, "interface", ri
def load_existing_links(self, object): for l in Link.object_links(object): if l.is_ptp: i1, i2 = l.interfaces if l.is_loop: # Loop to self self.submited.add((i1.name, object, i2.name)) self.submited.add((i2.name, object, i1.name)) else: # p2p link if i1.managed_object == object: self.submited.add( (i1.name, i2.managed_object, i2.name)) else: self.submited.add( (i2.name, i1.managed_object, i1.name))
def api_discovery(self, request, id): from noc.core.scheduler.job import Job o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") link_count = defaultdict(int) for link in Link.object_links(o): m = link.discovery_method or "" if "+" in m: m = m.split("+")[0] link_count[m] += 1 r = [{ "name": "ping", "enable_profile": o.object_profile.enable_ping, "status": o.get_status(), "last_run": None, "last_status": None, "next_run": None, "jcls": None, }] for name, jcls in self.DISCOVERY_JOBS: job = Job.get_job_data( "discovery", jcls=jcls, key=o.id, pool=o.pool.name) or {} d = { "name": name, "enable_profile": getattr(o.object_profile, "enable_%s_discovery" % name), "status": job.get(Job.ATTR_STATUS), "last_run": self.to_json(job.get(Job.ATTR_LAST)), "last_status": job.get(Job.ATTR_LAST_STATUS), "next_run": self.to_json(job.get(Job.ATTR_TS)), "jcls": jcls, } r += [d] return r
def handle_split_floating(self, profile, ids, *args, **options): connect() p = NetworkSegmentProfile.objects.filter(name=profile).first() if not p: self.die("Profile not found") if p.is_persistent: self.die("Segment profile cannot be persistent") for seg_id in ids: seg = NetworkSegment.get_by_id(seg_id) if not seg: self.print("@@@ %s - not found. Skipping" % seg_id) continue self.print("@@@ Splitting %s (%s)" % (seg.name, seg_id)) objects = list( ManagedObject.objects.filter(is_managed=True, segment=seg_id)) for mo in objects: new_segment = NetworkSegment( name=mo.administrative_domain.get_bioseg_floating_name(mo) or "Bubble for %s" % mo.name, profile=p, parent=mo.administrative_domain. get_bioseg_floating_parent_segment(), ) new_segment.save() self.print(" Moving '%s' to segment '%s'" % (mo.name, new_segment.name)) mo.segment = new_segment mo.save() # Establish trials self.print("@@@ Scheduling trials") for mo in objects: for link in Link.object_links(mo): for ro in link.managed_objects: if ro == mo: continue self.print(" '%s' challenging '%s' over %s -- %s" % (mo.segment.name, ro.segment.name, mo.name, ro.name)) BioSegTrial.schedule_trial(mo.segment, ro.segment, mo, ro, reason="link")
def handle_reactivate_floating(self, ids, profile=None, allow_persistent=False, *args, **options): connect() nsp = NetworkSegmentProfile.objects.fillter(is_persistent=False) if profile: nsp.filter(name=profile) if ids: ns = NetworkSegment.objects.filter(id__in=ids) elif nsp.count() > 0: ns = NetworkSegment.objects.filter(profile__in=nsp) else: self.die("Setting segment filter condition") if profile: p = NetworkSegmentProfile.objects.get(name=profile) ns = ns.filter(profile=p) for seg_id in ns.scalar("id"): seg = NetworkSegment.get_by_id(seg_id) if not seg: self.print("@@@ %s - not found. Skipping" % seg_id) continue self.print("@@@ Reactivating %s (%s)" % (seg.name, seg_id)) objects = list( ManagedObject.objects.filter(is_managed=True, segment=seg_id)) # Establish trials for mo in objects: for link in Link.object_links(mo): for ro in link.managed_objects: if ro == mo: continue self.print(" '%s' challenging '%s' over %s -- %s" % (mo.segment.name, ro.segment.name, mo.name, ro.name)) BioSegTrial.schedule_trial( mo.segment, ro.segment, mo, ro, reason="link", )
def api_discovery(self, request, id): o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") link_count = defaultdict(int) for link in Link.object_links(o): m = link.discovery_method or "" if "+" in m: m = m.split("+")[0] link_count[m] += 1 r = [{ "name": "ping", "enable_profile": o.object_profile.enable_ping, "status": o.get_status(), "last_run": None, "last_status": None, "next_run": None, "link_count": None }] for name in get_active_discovery_methods(): job = get_job("inv.discovery", name, o.id) or {} if name.endswith("_discovery"): lcmethod = name[:-10] else: lcmethod = None d = { "name": name, "enable_profile": getattr(o.object_profile, "enable_%s" % name), "status": job.get("s"), "last_run": self.to_json(job.get("last")), "last_status": job.get("ls"), "next_run": self.to_json(job.get("ts")), "link_count": link_count.get(lcmethod, "") } r += [d] return r
def get_data(self): def get_container_path(self): # Get container path if not self.object: return None cp = [] if self.object.container: c = self.object.container.id while c: try: o = Object.objects.get(id=c) # @todo: Address data if o.container: cp.insert(0, {"id": o.id, "name": o.name}) c = o.container.id if o.container else None except DoesNotExist: metrics["error", ("type", "no_such_object")] += 1 break return cp if not self.object: return None # @todo: Stage # @todo: Service range # @todo: Open TT now = datetime.datetime.now() # Get object status and uptime alarms = list( ActiveAlarm.objects.filter(managed_object=self.object.id)) current_start = None duration = None if self.object.is_managed: if self.object.get_status(): if alarms: current_state = "alarm" else: current_state = "up" uptime = Uptime.objects.filter(object=self.object.id, stop=None).first() if uptime: current_start = uptime.start else: current_state = "down" outage = Outage.objects.filter(object=self.object.id, stop=None).first() if outage is not None: current_start = outage.start else: current_state = "unmanaged" if current_start: duration = now - current_start cp = get_container_path(self) # MAC addresses macs = [] o_macs = DiscoveryID.macs_for_object(self.object) if o_macs: for f, l in o_macs: if f == l: macs += [f] else: macs += ["%s - %s" % (f, l)] # Links uplinks = set(self.object.data.uplinks) if len(uplinks) > 1: if self.object.segment.lost_redundancy: redundancy = "L" else: redundancy = "R" else: redundancy = "N" links = [] for l in Link.object_links(self.object): local_interfaces = [] remote_interfaces = [] remote_objects = set() for i in l.interfaces: if i.managed_object.id == self.object.id: local_interfaces += [i] else: remote_interfaces += [i] remote_objects.add(i.managed_object) if len(remote_objects) == 1: ro = remote_objects.pop() if ro.id in uplinks: role = "uplink" else: role = "downlink" links += [{ "id": l.id, "role": role, "local_interface": sorted(local_interfaces, key=lambda x: split_alnum(x.name)), "remote_object": ro, "remote_interface": sorted(remote_interfaces, key=lambda x: split_alnum(x.name)), "remote_status": "up" if ro.get_status() else "down", }] links = sorted( links, key=lambda x: (x["role"] != "uplink", split_alnum(x["local_interface"][0].name)), ) # Build global services summary service_summary = ServiceSummary.get_object_summary(self.object) # Interfaces interfaces = [] metrics_map = [ "Interface | Load | In", "Interface | Load | Out", "Interface | Errors | In", "Interface | Errors | Out", ] mo = ManagedObject.objects.filter(id=self.object.id) ifaces_metrics, last_ts = get_interface_metrics(mo[0]) ifaces_metrics = ifaces_metrics[mo[0]] objects_metrics, last_time = get_objects_metrics(mo[0]) objects_metrics = objects_metrics.get(mo[0]) meta = {} metric_type_name = dict(MetricType.objects.filter().scalar( "name", "measure")) metric_type_field = dict(MetricType.objects.filter().scalar( "field_name", "measure")) if objects_metrics: for path, mres in six.iteritems(objects_metrics): for key in mres: metric_name = "%s | %s" % (key, path) if any( path.split("|")) else key meta[metric_name] = { "type": metric_type_name[key], "value": mres[key] } for i in Interface.objects.filter(managed_object=self.object.id, type="physical"): load_in = "-" load_out = "-" errors_in = "-" errors_out = "-" iface_metrics = ifaces_metrics.get(str(i.name)) if iface_metrics: for key, value in six.iteritems(iface_metrics): if key not in metrics_map: continue metric_type = metric_type_name.get( key) or metric_type_field.get(key) if key == "Interface | Load | In": load_in = ("%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-") if key == "Interface | Load | Out": load_out = ("%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-") if key == "Interface | Errors | In": errors_in = value if value else "-" if key == "Interface | Errors | Out": errors_out = value if value else "-" interfaces += [{ "id": i.id, "name": i.name, "admin_status": i.admin_status, "oper_status": i.oper_status, "mac": i.mac or "", "full_duplex": i.full_duplex, "load_in": load_in, "load_out": load_out, "errors_in": errors_in, "errors_out": errors_out, "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000, "untagged_vlan": None, "tagged_vlan": None, "profile": i.profile, "service": i.service, "service_summary": service_summary.get("interface").get(i.id, {}), "description": i.description, }] si = list(i.subinterface_set.filter(enabled_afi="BRIDGE")) if len(si) == 1: si = si[0] interfaces[-1]["untagged_vlan"] = si.untagged_vlan interfaces[-1]["tagged_vlans"] = list_to_ranges( si.tagged_vlans).replace(",", ", ") interfaces = sorted(interfaces, key=lambda x: split_alnum(x["name"])) # Resource groups # Service groups (i.e. server) static_services = set(self.object.static_service_groups) service_groups = [] for rg_id in self.object.effective_service_groups: rg = ResourceGroup.get_by_id(rg_id) service_groups += [{ "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_services, }] # Client groups (i.e. client) static_clients = set(self.object.static_client_groups) client_groups = [] for rg_id in self.object.effective_client_groups: rg = ResourceGroup.get_by_id(rg_id) client_groups += [{ "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_clients, }] # @todo: Administrative domain path # Alarms alarm_list = [] for a in alarms: alarm_list += [{ "id": a.id, "root_id": self.get_root(alarms), "timestamp": a.timestamp, "duration": now - a.timestamp, "subject": a.subject, "managed_object": a.managed_object, "service_summary": { "service": SummaryItem.items_to_dict(a.total_services), "subscriber": SummaryItem.items_to_dict(a.total_subscribers), }, "alarm_class": a.alarm_class, }] alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp")) # Maintenance maintenance = [] for m in Maintenance.objects.filter( affected_objects__object=self.object.id, is_completed=False, start__lte=now + datetime.timedelta(hours=1), ): maintenance += [{ "maintenance": m, "id": m.id, "subject": m.subject, "start": m.start, "stop": m.stop, "in_progress": m.start <= now, }] # Get Inventory inv = [] for p in self.object.get_inventory(): c = self.get_nested_inventory(p) c["name"] = p.name or self.object.name inv += [c] # Build result if self.object.platform is not None: platform = self.object.platform.name else: platform = "Unknown" if self.object.version is not None: version = self.object.version.version else: version = "" r = { "id": self.object.id, "object": self.object, "name": self.object.name, "address": self.object.address, "platform": platform, # self.object.platform.name if self.object.platform else "Unknown", "version": version, # self.object.version.version if self.object.version else "", "description": self.object.description, "object_profile": self.object.object_profile.id, "object_profile_name": self.object.object_profile.name, "macs": ", ".join(sorted(macs)), "segment": self.object.segment, "firmware_status": FirmwarePolicy.get_status(self.object.platform, self.object.version), "firmware_recommended": FirmwarePolicy.get_recommended_version(self.object.platform), "service_summary": service_summary, "container_path": cp, "current_state": current_state, # Start of uptime/downtime "current_start": current_start, # Current uptime/downtime "current_duration": duration, "service_groups": service_groups, "client_groups": client_groups, "tt": [], "links": links, "alarms": alarm_list, "interfaces": interfaces, "metrics": meta, "maintenance": maintenance, "redundancy": redundancy, "inventory": self.flatten_inventory(inv), "serial_number": self.object.get_attr("Serial Number"), "attributes": list( ManagedObjectAttribute.objects.filter( managed_object=self.object.id)), "confdb": self.object.get_confdb(), } return r
def get_data(self): intervals = ( ("y", 31557617), # 60 * 60 * 24 * 7 * 52 ("w", 604800), # 60 * 60 * 24 * 7 ("d", 86400), # 60 * 60 * 24 ("h", 3600), # 60 * 60 ("m", 60), ("s", 1), ) def display_time(seconds): result = [] for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{}{}".format(value, name)) return ", ".join(result[:-1]) def sortdict(dct): kys = sorted(dct.keys()) res = OrderedDict() for x in kys: for k, v in dct.items(): if k == x: res[k] = v return res def get_container_path(self): # Get container path if not self.object: return None cp = [] if self.object.container: c = self.object.container.id while c: try: o = Object.objects.get(id=c) # @todo: Address data if o.container: cp.insert(0, {"id": o.id, "name": o.name}) c = o.container.id if o.container else None except DoesNotExist: metrics["error", ("type", "no_such_object")] += 1 break return cp if not self.object: return None # @todo: Stage # @todo: Service range # @todo: Open TT now = datetime.datetime.now() # Get object status and uptime alarms = list(ActiveAlarm.objects.filter(managed_object=self.object.id)) current_start = None duration = None if self.object.is_managed: if self.object.get_status(): if alarms: current_state = "alarm" else: current_state = "up" uptime = Uptime.objects.filter(object=self.object.id, stop=None).first() if uptime: current_start = uptime.start else: current_state = "down" outage = Outage.objects.filter(object=self.object.id, stop=None).first() if outage is not None: current_start = outage.start else: current_state = "unmanaged" if current_start: duration = now - current_start cp = get_container_path(self) # MAC addresses macs = [] o_macs = DiscoveryID.macs_for_object(self.object) if o_macs: for f, l in o_macs: if f == l: macs += [f] else: macs += ["%s - %s" % (f, l)] # Hostname hostname = "" did = DiscoveryID.objects.filter(object=self.object.id).first() if did and did.hostname: hostname = did.hostname # Links uplinks = set(self.object.data.uplinks) if len(uplinks) > 1: if self.object.segment.lost_redundancy: redundancy = "L" else: redundancy = "R" else: redundancy = "N" links = [] for _link in Link.object_links(self.object): local_interfaces = [] remote_interfaces = [] remote_objects = set() for iface in _link.interfaces: if iface.managed_object.id == self.object.id: local_interfaces += [iface] else: remote_interfaces += [iface] remote_objects.add(iface.managed_object) if len(remote_objects) == 1: ro = remote_objects.pop() if ro.id in uplinks: role = "uplink" else: role = "downlink" links += [ { "id": _link.id, "role": role, "local_interface": sorted( local_interfaces, key=lambda x: alnum_key(x.name) ), "remote_object": ro, "remote_interface": sorted( remote_interfaces, key=lambda x: alnum_key(x.name) ), "remote_status": "up" if ro.get_status() else "down", } ] links = sorted( links, key=lambda x: (x["role"] != "uplink", alnum_key(x["local_interface"][0].name)), ) # Build global services summary service_summary = ServiceSummary.get_object_summary(self.object) # Interfaces interfaces = [] mo = ManagedObject.objects.filter(id=self.object.id) mo = mo[0] ifaces_metrics, last_ts = get_interface_metrics(mo) ifaces_metrics = ifaces_metrics[mo] objects_metrics, last_time = get_objects_metrics(mo) objects_metrics = objects_metrics.get(mo) # Sensors sensors_metrics = None s_metrics = None sensors = {} s_meta = [] STATUS = {0: "OK", 1: "Alarm"} meric_map = {} if mo.get_caps().get("Sensor | Controller"): for mc in MetricType.objects.filter(scope=MetricScope.objects.get(name="Environment")): if meric_map: meric_map["map"].update({mc.field_name: mc.name}) else: meric_map = {"table_name": mc.scope.table_name, "map": {mc.field_name: mc.name}} sensors_metrics, last_ts = get_interface_metrics(mo, meric_map) sensors_metrics = sensors_metrics[mo] m_tp = {} if mo.object_profile.metrics: for mt in mo.object_profile.metrics: if mt.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id(mt.get("threshold_profile")) m_tp[MetricType.get_by_id(mt.get("metric_type")).name] = threshold_profile data = {} meta = [] metric_type_name = dict(MetricType.objects.filter().scalar("name", "measure")) metric_type_field = dict(MetricType.objects.filter().scalar("field_name", "measure")) if objects_metrics: for path, mres in objects_metrics.items(): t_v = False for key in mres: m_path = path if any(path.split("|")) else key m_path = " | ".join(kk.strip() for kk in m_path.split("|")) if m_tp.get(key): t_v = self.get_threshold_config(m_tp.get(key), int(mres[key])) val = { "name": m_path, "type": "" if m_path == "Object | SysUptime" else metric_type_name[key], "value": display_time(int(mres[key])) if m_path == "Object | SysUptime" else mres[key], "threshold": t_v, } if data.get(key): data[key] += [val] else: data[key] = [val] data = sortdict(data) for k, d in data.items(): collapsed = False if len(d) == 1: collapsed = True for dd in d: isdanger = False if dd["threshold"]: isdanger = True collapsed = True meta.append({"name": k, "value": d, "collapsed": collapsed, "isdanger": isdanger}) for i in Interface.objects.filter(managed_object=self.object.id, type="physical"): load_in = "-" load_out = "-" errors_in = "-" errors_out = "-" iface_metrics = ifaces_metrics.get(str(i.name)) if iface_metrics: for key, value in iface_metrics.items(): metric_type = metric_type_name.get(key) or metric_type_field.get(key) if key == "Interface | Load | In": load_in = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Load | Out": load_out = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Errors | In": errors_in = value if value else "-" if key == "Interface | Errors | Out": errors_out = value if value else "-" interfaces += [ { "id": i.id, "name": i.name, "admin_status": i.admin_status, "oper_status": i.oper_status, "mac": i.mac or "", "full_duplex": i.full_duplex, "load_in": load_in, "load_out": load_out, "errors_in": errors_in, "errors_out": errors_out, "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000, "untagged_vlan": None, "tagged_vlan": None, "profile": i.profile, "service": i.service, "service_summary": service_summary.get("interface").get(i.id, {}), "description": i.description, } ] if sensors_metrics: s_metrics = sensors_metrics.get(str(i.name)) if s_metrics: sens_metrics = [] for i_metrics in i.profile.metrics: sens_metrics.append(i_metrics.metric_type.name) for key, value in s_metrics.items(): if key not in sens_metrics: continue val = { "name": key, "type": metric_type_name[key], "value": STATUS.get(value) if metric_type_name[key] == " " else value, "threshold": None, } if sensors.get(i.name): sensors[i.name] += [val] else: sensors[i.name] = [val] si = list(i.subinterface_set.filter(enabled_afi="BRIDGE")) if len(si) == 1: si = si[0] interfaces[-1]["untagged_vlan"] = si.untagged_vlan interfaces[-1]["tagged_vlans"] = list_to_ranges(si.tagged_vlans).replace(",", ", ") if sensors: sensors = sortdict(sensors) for k, d in sensors.items(): for dd in d: isdanger = False if dd["threshold"]: isdanger = True s_meta.append({"name": k, "value": d, "isdanger": isdanger}) interfaces = sorted(interfaces, key=lambda x: alnum_key(x["name"])) # Resource groups # Service groups (i.e. server) static_services = set(self.object.static_service_groups) service_groups = [] for rg_id in self.object.effective_service_groups: rg = ResourceGroup.get_by_id(rg_id) service_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_services, } ] # Client groups (i.e. client) static_clients = set(self.object.static_client_groups) client_groups = [] for rg_id in self.object.effective_client_groups: rg = ResourceGroup.get_by_id(rg_id) client_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_clients, } ] # @todo: Administrative domain path # Alarms alarm_list = [] for a in alarms: alarm_list += [ { "id": a.id, "root_id": self.get_root(alarms), "timestamp": a.timestamp, "duration": now - a.timestamp, "subject": a.subject, "managed_object": a.managed_object, "service_summary": { "service": SummaryItem.items_to_dict(a.total_services), "subscriber": SummaryItem.items_to_dict(a.total_subscribers), }, "alarm_class": a.alarm_class, } ] alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp")) # Maintenance maintenance = [] for m in Maintenance.objects.filter( affected_objects__object=self.object.id, is_completed=False, start__lte=now + datetime.timedelta(hours=1), ): maintenance += [ { "maintenance": m, "id": m.id, "subject": m.subject, "start": m.start, "stop": m.stop, "in_progress": m.start <= now, } ] # Get Inventory inv = [] for p in self.object.get_inventory(): c = self.get_nested_inventory(p) c["name"] = p.name or self.object.name inv += [c] # Build result if self.object.platform is not None: platform = self.object.platform.name else: platform = "Unknown" if self.object.version is not None: version = self.object.version.version else: version = "" r = { "id": self.object.id, "object": self.object, "name": self.object.name, "address": self.object.address, "platform": platform, # self.object.platform.name if self.object.platform else "Unknown", "version": version, # self.object.version.version if self.object.version else "", "description": self.object.description, "object_profile": self.object.object_profile.id, "object_profile_name": self.object.object_profile.name, "hostname": hostname, "macs": ", ".join(sorted(macs)), "segment": self.object.segment, "firmware_status": FirmwarePolicy.get_status(self.object.platform, self.object.version), "firmware_recommended": FirmwarePolicy.get_recommended_version(self.object.platform), "service_summary": service_summary, "container_path": cp, "current_state": current_state, # Start of uptime/downtime "current_start": current_start, # Current uptime/downtime "current_duration": duration, "service_groups": service_groups, "client_groups": client_groups, "tt": [], "links": links, "alarms": alarm_list, "interfaces": interfaces, "metrics": meta, "sensors": s_meta, "maintenance": maintenance, "redundancy": redundancy, "inventory": self.flatten_inventory(inv), "serial_number": self.object.get_attr("Serial Number"), "attributes": list( ManagedObjectAttribute.objects.filter(managed_object=self.object.id) ), "confdb": None, } try: r["confdb"] = self.object.get_confdb() except (SyntaxError, ValueError): pass return r