def get_object_profile_metrics(cls, p_id: int) -> Dict[str, MetricConfig]: r = {} opr = ManagedObjectProfile.get_by_id(id=p_id) if not opr: return r for m in opr.metrics: mt_id = m.get("metric_type") if not mt_id: continue mt = MetricType.get_by_id(mt_id) if not mt: continue if m.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id( m.get("threshold_profile")) else: threshold_profile = None r[mt.name] = MetricConfig( mt, m.get("enable_box", True), m.get("enable_periodic", True), m.get("is_stored", True), threshold_profile, ) return r
def instance_to_dict(self, o, fields=None): d = super(ManagedObjectProfileApplication, self).instance_to_dict(o, fields=fields) if d["metrics"]: for m in d["metrics"]: mt = MetricType.get_by_id(m["metric_type"]) if mt: m["metric_type__label"] = mt.name return d
def handler(self): # Decode request try: req = ujson.loads(self.request.body) except ValueError: return 400, "Cannot decode JSON" # Validate try: req = Request.clean(req) except ValueError as e: return 400, "Bad request: %s" % e # Group metrics data = defaultdict(dict) # (scope, path, ts) -> field -> value bi_id = str(req["bi_id"]) for metric in req["metrics"]: mt = MetricType.get_by_name(metric["metric_type"]) if not mt: self.logger.error("Unknown metric type '%s'. Skipping", metric["metric_type"]) continue table = mt.scope.table_name field = mt.field_name path = tuple(metric["path"]) for ts, value in metric["values"]: # @todo: Check timestamp # @todo: Check value type data[table, path, ts][field] = value # Prepare to send chains = defaultdict(list) for (scope, path, ts), values in six.iteritems(data): # @todo: Proper key fields record_fields = ["%s.date.ts.managed_object" % scope] if path: record_fields += ["path"] fields = sorted(values) record_fields += fields rf = ".".join(record_fields) if isinstance(rf, unicode): rf = rf.encode("utf-8") # Convert timestamp to CH format ts = ts.replace("T", " ") date = ts.split()[0] # Build record record = [date, ts, bi_id] if path: record += [self.quote_path(path)] record += [str(values[f]) for f in fields] r = "\t".join(record) if isinstance(r, unicode): r = r.encode("utf-8") chains[rf] += [r] # Spool metrics for f in chains: self.service.register_metrics(f, chains[f]) return 200, "OK"
def instance_to_dict(self, o, fields=None): d = super().instance_to_dict(o, fields=fields) if d["metrics"]: for m in d["metrics"]: mt = MetricType.get_by_id(m["metric_type"]) if mt: m["metric_type__label"] = mt.name if m.get("threshold_profile"): tp = ThresholdProfile.get_by_id(m["threshold_profile"]) m["threshold_profile__label"] = tp.name return d
def get_sensor_metrics(self): metrics = [] o = Object.get_managed(self.object).values_list("id") for s in (Sensor._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED).find( { "object": { "$in": list(o) }, "snmp_oid": { "$exists": True } }, { "local_id": 1, "profile": 1, "state": 1, "snmp_oid": 1, "labels": 1, "bi_id": 1 }, )): if not s.get("profile"): self.logger.debug("[%s] Sensor has no profile. Skipping", s["name"]) continue pm: "SensorProfile" = SensorProfile.get_by_id(s["profile"]) if not pm.enable_collect: continue state = State.get_by_id(s["state"]) if not state.is_productive: self.logger.debug( "[%s] Sensor is not productive state. Skipping", s["name"]) continue for mtype in ["Sensor | Value", "Sensor | Status"]: m_id = next(self.id_count) metric = MetricType.get_by_name(mtype) labels = [f'noc::sensor::{s["local_id"]}'] + s.get( "labels", []) metrics += [{ "id": m_id, "metric": metric.name, "labels": labels, "oid": s["snmp_oid"], }] self.id_metrics[m_id] = MetricConfig(metric, False, True, True, None) self.sensors_metrics[m_id] = int(s["bi_id"]) return metrics
def handler(self): # Decode request try: req = orjson.loads(self.request.body) except ValueError: return 400, "Cannot decode JSON" # Validate try: req = Request.clean(req) except ValueError as e: return 400, "Bad request: %s" % e # Group metrics data = defaultdict(dict) # (scope, path, ts) -> field -> value bi_id = str(req["bi_id"]) for metric in req["metrics"]: mt = MetricType.get_by_name(metric["metric_type"]) if not mt: self.logger.error("Unknown metric type '%s'. Skipping", metric["metric_type"]) continue table = mt.scope.table_name field = mt.field_name path = tuple(metric["path"]) for ts, value in metric["values"]: # @todo: Check timestamp # @todo: Check value type data[table, path, ts][field] = value # Prepare to send chains = defaultdict(list) # table -> metrics for (scope, path, ts), values in data.items(): # Convert timestamp to CH format ts = ts.replace("T", " ") date = ts.split()[0] # Metric record data = values # Values first to protect critical fields data.update({"date": date, "ts": ts, "managed_object": bi_id}) if path: data["path"] = [str(x) for x in path] chains[scope] += [data] # Spool metrics for f in chains: self.service.register_metrics(f, chains[f]) return 200, "OK"
def get_object_profile_metrics(cls, p_id): r = {} opr = ManagedObjectProfile.get_by_id(id=p_id) if not opr: return r for m in opr.metrics: mt_id = m.get("metric_type") if not mt_id: continue mt = MetricType.get_by_id(mt_id) if not mt: continue le = m.get("low_error") lw = m.get("low_warn") he = m.get("high_error") hw = m.get("high_warn") lew = AlarmSeverity.severity_for_weight(int(m.get("low_error_weight", 10))) lww = AlarmSeverity.severity_for_weight(int(m.get("low_warn_weight", 1))) hew = AlarmSeverity.severity_for_weight(int(m.get("high_error_weight", 1))) hww = AlarmSeverity.severity_for_weight(int(m.get("high_warn_weight", 10))) threshold_profile = None if m.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id(m.get("threshold_profile")) r[mt.name] = MetricConfig( mt, m.get("enable_box", True), m.get("enable_periodic", True), m.get("is_stored", True), m.get("window_type", "m"), int(m.get("window", 1)), m.get("window_function", "last"), m.get("window_config"), m.get("window_related", False), int(le) if le is not None else None, int(lw) if lw is not None else None, int(hw) if hw is not None else None, int(he) if he is not None else None, lew, lww, hww, hew, threshold_profile, le is not None or lw is not None or he is not None or hw is not None ) return r
def handler(self): # Decode request try: req = ujson.loads(self.request.body) except ValueError: return 400, "Cannot decode JSON" # Validate try: req = Request.clean(req) except ValueError as e: return 400, "Bad request: %s" % e # Check timestamps from_ts = dateutil.parser.parse(req["from"]) to_ts = dateutil.parser.parse(req["to"]) if to_ts < from_ts: return 400, "Invalid range" # Check time range delta = to_ts - from_ts if delta.total_seconds() > config.nbi.objectmetrics_max_interval: return 400, "Requested range too large" # Prepare data for queries objects = set() for mc in req["metrics"]: try: mo_id = int(mc["object"]) objects.add(mo_id) except ValueError: return 400, "Invalid object id: %s" % mc["object"] # if not objects: return 200, [] # Map managed object id to bi_id id_to_bi = {} profiles = {} # object id -> profile for mo_id, bi_id, profile_id in ManagedObject.objects.filter( id__in=list(objects)).values_list("id", "bi_id", "profile"): id_to_bi[str(mo_id)] = bi_id profiles[str(mo_id)] = Profile.get_by_id(profile_id).get_profile() if not id_to_bi: return 404, "Object(s) id not found: %s" % ",".join( [str(o) for o in objects]) # Prepare queries scopes = {} # table_name -> ([fields, ..], [where, ..]) for mc in req["metrics"]: profile = profiles[mc["object"]] ifaces = tuple( sorted( profile.convert_interface_name(i) for i in mc.get("interfaces", []))) for mn in mc["metric_types"]: mt = MetricType.get_by_name(mn) if not mt: return 400, "Invalid metric_type: %s" % mn table = mt.scope.table_name q = scopes.get(table) if not q: q = (set(), set()) scopes[table] = q q[0].add(mt.field_name) if table == S_INTERFACE: q[1].add((id_to_bi[mc["object"]], ifaces)) else: q[1].add((id_to_bi[mc["object"]], )) # Execute queries and collect result from_date = from_ts.strftime("%Y-%m-%d") to_date = to_ts.strftime("%Y-%m-%d") if from_date == to_date: date_q = "date = '%s'" % from_date else: date_q = "date >= '%s' AND date <= '%s'" % (from_date, to_date) date_q = "%s AND ts >= '%s' AND ts <= '%s'" % ( date_q, from_ts.replace(tzinfo=None).isoformat(), to_ts.replace(tzinfo=None).isoformat(), ) connect = ClickhouseClient() scope_data = {} for table in scopes: sdata = { } # managed_object.bi_id, interface, field -> ([(ts, value), ...], path) scope_data[table] = sdata # Build SQL request qx = [] for wx in scopes[table][1]: if len(wx) == 1 or not wx[1]: qx += ["(managed_object = %d)" % wx[0]] elif len(wx[1]) == 1: qx += [ "(managed_object = %d AND path[4] = '%s')" % (wx[0], wx[1][0]) ] else: qx += [ "(managed_object = %d AND path[4] IN (%s))" % (wx[0], ", ".join("'%s'" % x for x in wx[1])) ] fields = ["ts", "managed_object", "path"] + sorted( scopes[table][0]) query = "SELECT %s FROM %s WHERE %s AND (%s)" % ( ", ".join(fields), table, date_q, " OR ".join(qx), ) # Execute self.logger.info("%s", query) try: data = connect.execute(query) except ClickhouseError as e: self.logger.error("SQL Error: %s", e) return 500, "SQL Error: %s" % e # Process result for row in data: d = dict(zip(fields, row)) ts = d.pop("ts") mo = int(d.pop("managed_object")) path = self.clear_path(d.pop("path")) if table == S_INTERFACE: iface = path[3] else: iface = None for field in d: key = (mo, iface, field) item = (ts, d[field]) bucket = sdata.get(key) if bucket: xdata = bucket[0] xdata += [item] else: sdata[key] = ([item], path) # Format result result = [] for mc in req["metrics"]: ifaces = tuple(sorted(mc.get("interfaces", []))) mo_bi_id = id_to_bi[mc["object"]] for mn in mc["metric_types"]: mt = MetricType.get_by_name(mn) table = mt.scope.table_name field = mt.field_name if table == S_INTERFACE: if_list = ifaces else: if_list = (None, ) sdata = scope_data[table] for iface in if_list: key = (mo_bi_id, iface, field) mdata = sdata.get(key) if not mdata: continue points, path = mdata # Clean data type points = sorted( ((p[0].replace(" ", "T"), mt.clean_value(p[1])) for p in points), key=operator.itemgetter(0), ) # r = { "object": mc["object"], "metric_type": mn, "path": path, "values": points } if iface is not None: r["interface"] = iface result += [r] # Return response return 200, {"from": req["from"], "to": req["to"], "metrics": result}
def resolve_object_data(self, object): def interface_profile_has_metrics(profile): """ Check interface profile has metrics """ for m in profile.metrics: if m.enable_box or m.enable_periodic: return True return False port_types = [] object_metrics = [] lags = [] subif = [] # Get all interface profiles with configurable metrics all_ifaces = list( Interface.objects.filter(managed_object=self.object.id)) iprof = set(i.profile for i in all_ifaces) # @todo: Order by priority profiles = [p for p in iprof if interface_profile_has_metrics(p)] # Create charts for configured interface metrics for profile in profiles: ifaces = [i for i in all_ifaces if i.profile == profile] ports = [] for iface in sorted(ifaces, key=lambda el: split_alnum(el.name)): if iface.type == "SVI" and not iface.profile.allow_subinterface_metrics: continue if iface.type == "aggregated" and iface.lag_members: lags += [{ "name": iface.name, "ports": [i.name for i in iface.lag_members], "descr": self.str_cleanup(iface.description, remove_letters=TITLE_BAD_CHARS) or "No description", "status": [ "status : ".join([i.name, i.status]) for i in iface.lag_members ], }] continue ports += [{ "name": iface.name, "descr": self.str_cleanup(iface.description, remove_letters=TITLE_BAD_CHARS), "status": iface.status, }] if iface.profile.allow_subinterface_metrics: subif += [{ "name": si.name, "descr": self.str_cleanup(si.description, remove_letters=TITLE_BAD_CHARS), } for si in SubInterface.objects.filter(interface=iface)] if not ports: continue port_types += [{ "type": profile.id, "name": profile.name, "ports": ports }] if self.object.object_profile.report_ping_rtt: object_metrics += ["rtt"] om = [] for m in self.object.object_profile.metrics or []: mt = MetricType.get_by_id(m["metric_type"]) if not mt or not (m.get("enable_periodic", False) or m.get("enable_box", False)): continue om += [mt.name] object_metrics.extend(sorted(om)) return { "port_types": port_types, "object_metrics": object_metrics, "lags": lags, "subifaces": subif, }
def resolve_object_data(self, object): def interface_profile_has_metrics(object): """ Check interface profile has metrics """ ifaces = {} sensors = {} sensors_status = [] for iface in Interface.objects.filter(managed_object=object, type="physical"): if iface.name in ["eth0", "st"]: ifaces[iface.name] = { "type": iface.profile.id, "name": iface.profile.name, "status": iface.status, "descr": self.str_cleanup(iface.description, remove_letters=TITLE_BAD_CHARS), } else: for metric in iface.profile.metrics: if ( metric.enable_box or metric.enable_periodic ) and metric.metric_type.scope.table_name == "environment": if metric.metric_type.field_name == "sensor_status": sensors_status.append(iface.name) continue if iface.name in sensors: sensors[iface.name]["metrics"] += [ metric.metric_type.field_name ] else: sensors[iface.name] = { "metrics": [metric.metric_type.field_name], "profile": iface.profile.name, "status": iface.status, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS), } return sensors_status, sensors, ifaces port_types = [] sensor_types = [] object_metrics = [] ports = [] selected_types = defaultdict(list) # Get all interface profiles with configurable metrics sensors_status, sensors, ifaces = interface_profile_has_metrics( self.object.id) # Create charts for configured interface metrics for sensor in sorted(sensors.keys()): sensor_types += [{ "name": sensor, "metrics": sensors[sensor].get("metrics"), "profile": sensors[sensor].get("profile"), "descr": sensors[sensor].get("descr"), "status": sensors[sensor].get("status"), }] # Create charts for configured interface metrics for iface in sorted(ifaces.keys()): ports += [{ "name": iface, "descr": ifaces[iface].get("descr"), "status": ifaces[iface].get("status"), }] port_types += [{ "type": ifaces[iface].get("type"), "name": ifaces[iface].get("name"), "ports": ports, }] if self.object.object_profile.report_ping_rtt: object_metrics += ["rtt"] om = [] for metrics in self.object.object_profile.metrics or []: mt = MetricType.get_by_id(metrics["metric_type"]) if not mt or not (metrics.get("enable_periodic", False) or metrics.get("enable_box", False)): continue om += [mt.name] object_metrics.extend(sorted(om)) return { "sensors_status": sensors_status, "sensor_types": sensor_types, "port_types": port_types, "selected_types": selected_types, "object_metrics": object_metrics, }
def get_data(self): intervals = ( ("y", 31557617), # 60 * 60 * 24 * 7 * 52 ("w", 604800), # 60 * 60 * 24 * 7 ("d", 86400), # 60 * 60 * 24 ("h", 3600), # 60 * 60 ("m", 60), ("s", 1), ) def display_time(seconds): result = [] for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{}{}".format(value, name)) return ", ".join(result[:-1]) def sortdict(dct): kys = sorted(dct.keys()) res = OrderedDict() for x in kys: for k, v in dct.items(): if k == x: res[k] = v return res def get_container_path(self): # Get container path if not self.object: return None cp = [] if self.object.container: c = self.object.container.id while c: try: o = Object.objects.get(id=c) # @todo: Address data if o.container: cp.insert(0, {"id": o.id, "name": o.name}) c = o.container.id if o.container else None except DoesNotExist: metrics["error", ("type", "no_such_object")] += 1 break return cp if not self.object: return None # @todo: Stage # @todo: Service range # @todo: Open TT now = datetime.datetime.now() # Get object status and uptime alarms = list(ActiveAlarm.objects.filter(managed_object=self.object.id)) current_start = None duration = None if self.object.is_managed: if self.object.get_status(): if alarms: current_state = "alarm" else: current_state = "up" uptime = Uptime.objects.filter(object=self.object.id, stop=None).first() if uptime: current_start = uptime.start else: current_state = "down" outage = Outage.objects.filter(object=self.object.id, stop=None).first() if outage is not None: current_start = outage.start else: current_state = "unmanaged" if current_start: duration = now - current_start cp = get_container_path(self) # MAC addresses macs = [] o_macs = DiscoveryID.macs_for_object(self.object) if o_macs: for f, l in o_macs: if f == l: macs += [f] else: macs += ["%s - %s" % (f, l)] # Hostname hostname = "" did = DiscoveryID.objects.filter(object=self.object.id).first() if did and did.hostname: hostname = did.hostname # Links uplinks = set(self.object.data.uplinks) if len(uplinks) > 1: if self.object.segment.lost_redundancy: redundancy = "L" else: redundancy = "R" else: redundancy = "N" links = [] for _link in Link.object_links(self.object): local_interfaces = [] remote_interfaces = [] remote_objects = set() for iface in _link.interfaces: if iface.managed_object.id == self.object.id: local_interfaces += [iface] else: remote_interfaces += [iface] remote_objects.add(iface.managed_object) if len(remote_objects) == 1: ro = remote_objects.pop() if ro.id in uplinks: role = "uplink" else: role = "downlink" links += [ { "id": _link.id, "role": role, "local_interface": sorted( local_interfaces, key=lambda x: alnum_key(x.name) ), "remote_object": ro, "remote_interface": sorted( remote_interfaces, key=lambda x: alnum_key(x.name) ), "remote_status": "up" if ro.get_status() else "down", } ] links = sorted( links, key=lambda x: (x["role"] != "uplink", alnum_key(x["local_interface"][0].name)), ) # Build global services summary service_summary = ServiceSummary.get_object_summary(self.object) # Interfaces interfaces = [] mo = ManagedObject.objects.filter(id=self.object.id) mo = mo[0] ifaces_metrics, last_ts = get_interface_metrics(mo) ifaces_metrics = ifaces_metrics[mo] objects_metrics, last_time = get_objects_metrics(mo) objects_metrics = objects_metrics.get(mo) # Sensors sensors_metrics = None s_metrics = None sensors = {} s_meta = [] STATUS = {0: "OK", 1: "Alarm"} meric_map = {} if mo.get_caps().get("Sensor | Controller"): for mc in MetricType.objects.filter(scope=MetricScope.objects.get(name="Environment")): if meric_map: meric_map["map"].update({mc.field_name: mc.name}) else: meric_map = {"table_name": mc.scope.table_name, "map": {mc.field_name: mc.name}} sensors_metrics, last_ts = get_interface_metrics(mo, meric_map) sensors_metrics = sensors_metrics[mo] m_tp = {} if mo.object_profile.metrics: for mt in mo.object_profile.metrics: if mt.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id(mt.get("threshold_profile")) m_tp[MetricType.get_by_id(mt.get("metric_type")).name] = threshold_profile data = {} meta = [] metric_type_name = dict(MetricType.objects.filter().scalar("name", "measure")) metric_type_field = dict(MetricType.objects.filter().scalar("field_name", "measure")) if objects_metrics: for path, mres in objects_metrics.items(): t_v = False for key in mres: m_path = path if any(path.split("|")) else key m_path = " | ".join(kk.strip() for kk in m_path.split("|")) if m_tp.get(key): t_v = self.get_threshold_config(m_tp.get(key), int(mres[key])) val = { "name": m_path, "type": "" if m_path == "Object | SysUptime" else metric_type_name[key], "value": display_time(int(mres[key])) if m_path == "Object | SysUptime" else mres[key], "threshold": t_v, } if data.get(key): data[key] += [val] else: data[key] = [val] data = sortdict(data) for k, d in data.items(): collapsed = False if len(d) == 1: collapsed = True for dd in d: isdanger = False if dd["threshold"]: isdanger = True collapsed = True meta.append({"name": k, "value": d, "collapsed": collapsed, "isdanger": isdanger}) for i in Interface.objects.filter(managed_object=self.object.id, type="physical"): load_in = "-" load_out = "-" errors_in = "-" errors_out = "-" iface_metrics = ifaces_metrics.get(str(i.name)) if iface_metrics: for key, value in iface_metrics.items(): metric_type = metric_type_name.get(key) or metric_type_field.get(key) if key == "Interface | Load | In": load_in = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Load | Out": load_out = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Errors | In": errors_in = value if value else "-" if key == "Interface | Errors | Out": errors_out = value if value else "-" interfaces += [ { "id": i.id, "name": i.name, "admin_status": i.admin_status, "oper_status": i.oper_status, "mac": i.mac or "", "full_duplex": i.full_duplex, "load_in": load_in, "load_out": load_out, "errors_in": errors_in, "errors_out": errors_out, "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000, "untagged_vlan": None, "tagged_vlan": None, "profile": i.profile, "service": i.service, "service_summary": service_summary.get("interface").get(i.id, {}), "description": i.description, } ] if sensors_metrics: s_metrics = sensors_metrics.get(str(i.name)) if s_metrics: sens_metrics = [] for i_metrics in i.profile.metrics: sens_metrics.append(i_metrics.metric_type.name) for key, value in s_metrics.items(): if key not in sens_metrics: continue val = { "name": key, "type": metric_type_name[key], "value": STATUS.get(value) if metric_type_name[key] == " " else value, "threshold": None, } if sensors.get(i.name): sensors[i.name] += [val] else: sensors[i.name] = [val] si = list(i.subinterface_set.filter(enabled_afi="BRIDGE")) if len(si) == 1: si = si[0] interfaces[-1]["untagged_vlan"] = si.untagged_vlan interfaces[-1]["tagged_vlans"] = list_to_ranges(si.tagged_vlans).replace(",", ", ") if sensors: sensors = sortdict(sensors) for k, d in sensors.items(): for dd in d: isdanger = False if dd["threshold"]: isdanger = True s_meta.append({"name": k, "value": d, "isdanger": isdanger}) interfaces = sorted(interfaces, key=lambda x: alnum_key(x["name"])) # Resource groups # Service groups (i.e. server) static_services = set(self.object.static_service_groups) service_groups = [] for rg_id in self.object.effective_service_groups: rg = ResourceGroup.get_by_id(rg_id) service_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_services, } ] # Client groups (i.e. client) static_clients = set(self.object.static_client_groups) client_groups = [] for rg_id in self.object.effective_client_groups: rg = ResourceGroup.get_by_id(rg_id) client_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_clients, } ] # @todo: Administrative domain path # Alarms alarm_list = [] for a in alarms: alarm_list += [ { "id": a.id, "root_id": self.get_root(alarms), "timestamp": a.timestamp, "duration": now - a.timestamp, "subject": a.subject, "managed_object": a.managed_object, "service_summary": { "service": SummaryItem.items_to_dict(a.total_services), "subscriber": SummaryItem.items_to_dict(a.total_subscribers), }, "alarm_class": a.alarm_class, } ] alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp")) # Maintenance maintenance = [] for m in Maintenance.objects.filter( affected_objects__object=self.object.id, is_completed=False, start__lte=now + datetime.timedelta(hours=1), ): maintenance += [ { "maintenance": m, "id": m.id, "subject": m.subject, "start": m.start, "stop": m.stop, "in_progress": m.start <= now, } ] # Get Inventory inv = [] for p in self.object.get_inventory(): c = self.get_nested_inventory(p) c["name"] = p.name or self.object.name inv += [c] # Build result if self.object.platform is not None: platform = self.object.platform.name else: platform = "Unknown" if self.object.version is not None: version = self.object.version.version else: version = "" r = { "id": self.object.id, "object": self.object, "name": self.object.name, "address": self.object.address, "platform": platform, # self.object.platform.name if self.object.platform else "Unknown", "version": version, # self.object.version.version if self.object.version else "", "description": self.object.description, "object_profile": self.object.object_profile.id, "object_profile_name": self.object.object_profile.name, "hostname": hostname, "macs": ", ".join(sorted(macs)), "segment": self.object.segment, "firmware_status": FirmwarePolicy.get_status(self.object.platform, self.object.version), "firmware_recommended": FirmwarePolicy.get_recommended_version(self.object.platform), "service_summary": service_summary, "container_path": cp, "current_state": current_state, # Start of uptime/downtime "current_start": current_start, # Current uptime/downtime "current_duration": duration, "service_groups": service_groups, "client_groups": client_groups, "tt": [], "links": links, "alarms": alarm_list, "interfaces": interfaces, "metrics": meta, "sensors": s_meta, "maintenance": maintenance, "redundancy": redundancy, "inventory": self.flatten_inventory(inv), "serial_number": self.object.get_attr("Serial Number"), "attributes": list( ManagedObjectAttribute.objects.filter(managed_object=self.object.id) ), "confdb": None, } try: r["confdb"] = self.object.get_confdb() except (SyntaxError, ValueError): pass return r
def resolve_object_data(self, object): def interface_profile_has_metrics(profile): """ Check interface profile has metrics """ for m in profile.metrics: if m.enable_box or m.enable_periodic: return True return False def interface_radio_metrics(profile): """ Check interface profile has metrics """ metrics = [] for m in profile.metrics: if m.metric_type.name.startswith("Radio"): metrics.append(m.metric_type.field_name) if metrics: return metrics return None def interface_dom_metrics(profile): """ Check interface profile has metrics """ metrics = [] for m in profile.metrics: if m.metric_type.name.startswith("Interface | DOM"): metrics.append(m.metric_type.field_name) if metrics: return metrics return None def check_metrics(metric): """ Object check metrics """ if metric.name.startswith("Check"): return True return False port_types = [] object_metrics = [] object_check_metrics = [] lags = [] subif = [] radio_types = [] dom_types = [] selected_types = defaultdict(list) selected_ifaces = set(self.extra_vars.get("var_ifaces", "").split(",")) # Get all interface profiles with configurable metrics all_ifaces = list(Interface.objects.filter(managed_object=self.object.id)) iprof = set(i.profile for i in all_ifaces) # @todo: Order by priority profiles = [p for p in iprof if interface_profile_has_metrics(p)] # Create charts for configured interface metrics for profile in profiles: ifaces = [i for i in all_ifaces if i.profile == profile] ports = [] radio = [] dom = [] for iface in sorted(ifaces, key=lambda el: alnum_key(el.name)): if iface.type == "SVI" and not iface.profile.allow_subinterface_metrics: continue if iface.type == "aggregated" and iface.lag_members: lags += [ { "name": iface.name, "ports": [i.name for i in iface.lag_members], "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ) or "No description", "status": [ ", Status : ".join([i.name, i.status]) for i in iface.lag_members ], } ] continue if interface_radio_metrics(profile): radio += [ { "name": iface.name, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ), "status": iface.status, "metrics": interface_radio_metrics(profile), } ] if interface_dom_metrics(profile) and iface.type == "physical": dom += [ { "name": iface.name, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ), "status": iface.status, "metrics": interface_dom_metrics(profile), "type": profile.id, "profile_name": profile.name, } ] if iface.type == "physical": ports += [ { "name": iface.name, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ), "status": iface.status, } ] if iface.profile.allow_subinterface_metrics: subif += [ { "name": si.name, "descr": self.str_cleanup( si.description, remove_letters=TITLE_BAD_CHARS ), } for si in SubInterface.objects.filter(interface=iface) ] if iface.name in selected_ifaces: selected_types[profile.id] += [iface.name] if ports: port_types += [{"type": profile.id, "name": profile.name, "ports": ports}] if radio: radio_types += [{"type": profile.id, "name": profile.name, "ports": radio}] if dom: dom_types += dom if self.object.object_profile.report_ping_rtt: object_metrics += ["rtt"] om = [] ocm = [] for m in self.object.object_profile.metrics or []: mt = MetricType.get_by_id(m["metric_type"]) if not mt or not (m.get("enable_periodic", False) or m.get("enable_box", False)): continue if check_metrics(mt): ocm += [{"name": mt.name, "metric": mt.field_name}] continue om += [mt.name] object_metrics.extend(sorted(om)) object_check_metrics.extend(sorted(ocm, key=operator.itemgetter("name"))) return { "port_types": port_types, "selected_types": selected_types, "object_metrics": object_metrics, "object_check_metrics": object_check_metrics, "lags": lags, "subifaces": subif, "radio_types": radio_types, "dom_types": sorted(dom_types, key=lambda x: alnum_key(x["name"])), }
def resolve_object_data(self, object): def interface_profile_has_metrics(object): """ Check interface profile has metrics """ ifaces = {} channels = [] groups = [] for iface in Interface.objects.filter(managed_object=object, type="physical"): if "gbe" in iface.name.lower(): ifaces[iface.name] = { "type": iface.profile.id, "name": iface.profile.name, "status": iface.status, "descr": self.str_cleanup(iface.description, remove_letters=TITLE_BAD_CHARS), } continue for metric in iface.profile.metrics: if metric.enable_box or metric.enable_periodic: if is_ipv4(iface.name.split("/")[1]): groups.append(iface.name) else: channels.append(iface.name) return ifaces, channels, groups port_types = [] object_metrics = [] ports = [] # Get all interface profiles with configurable metrics ifaces, channels, groups = interface_profile_has_metrics( self.object.id) # Create charts for configured interface metrics for iface in sorted(ifaces.keys()): ports += [{ "name": iface, "descr": ifaces[iface].get("descr"), "status": ifaces[iface].get("status"), }] port_types += [{ "type": ifaces[iface].get("type"), "name": ifaces[iface].get("name"), "ports": ports }] if self.object.object_profile.report_ping_rtt: object_metrics += ["rtt"] om = [] for metrics in self.object.object_profile.metrics or []: mt = MetricType.get_by_id(metrics["metric_type"]) if not mt or not (metrics.get("enable_periodic", False) or metrics.get("enable_box", False)): continue om += [mt.name] object_metrics.extend(sorted(om)) if self.extra_template and self.extra_vars: self.template = "dash_multicast.j2" return { "channels": set(channels), "groups": set(groups), "port_types": port_types, "object_metrics": object_metrics, }