def get_alarms(self, mo, f, t, annotation): r = [] for ac in (ActiveAlarm, ArchivedAlarm): q = {"managed_object": mo.id} if ac.status == "A": q["timestamp"] = {"$gte": f, "$lte": t} else: q["$or"] = [ { "timestamp": { "$gte": f, "$lte": t } }, { "clear_timestamp": { "$gte": f, "$lte": t } }, ] c = ac._get_collection() for d in c.find( q, { "_id": 1, "managed_object": 1, "alarm_class": 1, "timestamp": 1, "clear_timestamp": 1, }, ): if f <= d["timestamp"] <= t: r += [{ "annotation": annotation, "time": mktime(d["timestamp"].timetuple()) * 1000 + d["timestamp"].microsecond / 1000, "title": AlarmClass.get_by_id(d["alarm_class"]).name # "tags": X, # "text": X }] if "clear_timestamp" in d and f <= d["clear_timestamp"] <= t: r += [{ "annotation": annotation, "time": mktime(d["timestamp"].timetuple()) * 1000 + d["timestamp"].microsecond / 1000, "title": "[CLEAR] %s" % AlarmClass.get_by_id(d["alarm_class"]).name # "tags": X, # "text": X }] return r
def filter_alarmclass(cls, *args): ids = [ str(AlarmClass.get_by_name(a).id) for a in args if AlarmClass.get_by_name(a) ] if len(ids) == 1: return {"%s.alarmclass" % cls.F_META: ids[0]} else: return {"%s.alarmclass" % cls.F_META: {"$in": ids}}
def update_alarms(self): from noc.fm.models.alarmseverity import AlarmSeverity from noc.fm.models.alarmclass import AlarmClass prev_status = self.context.get("umbrella_settings", False) current_status = self.can_update_alarms() self.context["umbrella_settings"] = current_status if not prev_status and not current_status: return self.logger.info("Updating alarm statuses") umbrella_cls = AlarmClass.get_by_name(self.umbrella_cls) if not umbrella_cls: self.logger.info( "No umbrella alarm class. Alarm statuses not updated") return details = [] if current_status: fatal_weight = self.get_fatal_alarm_weight() weight = self.get_alarm_weight() for p in self.problems: if not p["alarm_class"]: continue ac = AlarmClass.get_by_name(p["alarm_class"]) if not ac: self.logger.info("Unknown alarm class %s. Skipping", p["alarm_class"]) continue details += [{ "alarm_class": ac, "path": p["path"], "severity": AlarmSeverity.severity_for_weight( fatal_weight if p["fatal"] else weight), "vars": { "path": p["path"], "message": p["message"] } }] else: # Clean up all open alarms as they has been disabled details = [] self.update_umbrella(umbrella_cls, details)
def get_umbrella_alarm_cfg(self, alarm_class=None, path=None, message=None, fatal=False, **kwargs): """ Getting Umbrella Alarm Cfg :param alarm_class: Alarm class instance or name :param path: Additional path :param message: Text message :param fatal: True if problem is fatal and all following checks must be disabled :param kwargs: Dict containing optional variables :return: """ alarm_cfg = { "alarm_class": AlarmClass.get_by_name(alarm_class), "path": " | ".join(path), "vars": kwargs, } alarm_cfg["vars"]["message"] = message alarm_cfg["vars"]["path"] = path return alarm_cfg
def get_ac_cm_violations(): return AlarmClass.get_by_name("Config | Policy Violations")
def extract(self, *args, **options): nr = 0 # Get reboots r = Reboot._get_collection().aggregate([ { "$match": { "ts": { "$gt": self.start - self.reboot_interval, "$lte": self.stop } } }, { "$sort": { "ts": 1 } }, { "$group": { "_id": "$object", "reboots": { "$push": "$ts" } } }, ]) # object -> [ts1, .., tsN] reboots = {d["_id"]: d["reboots"] for d in r} # for d in self.iter_data(): mo = ManagedObject.get_by_id(d["managed_object"]) if not mo: continue # Process reboot data o_reboots = reboots.get(d["managed_object"], []) n_reboots = hits_in_range(o_reboots, d["timestamp"] - self.reboot_interval, d["clear_timestamp"]) # self.alarm_stream.push( ts=d["timestamp"], close_ts=d["clear_timestamp"], duration=max( 0, int((d["clear_timestamp"] - d["timestamp"]).total_seconds())), alarm_id=str(d["_id"]), root=str(d.get("root") or ""), rca_type=d.get("rca_type") or 0, alarm_class=AlarmClass.get_by_id(d["alarm_class"]), severity=d["severity"], reopens=d.get("reopens") or 0, direct_services=sum(ss["summary"] for ss in d.get("direct_services", [])), direct_subscribers=sum( ss["summary"] for ss in d.get("direct_subscribers", [])), total_objects=sum(ss["summary"] for ss in d.get("total_objects", [])), total_services=sum(ss["summary"] for ss in d.get("total_services", [])), total_subscribers=sum( ss["summary"] for ss in d.get("total_subscribers", [])), escalation_ts=d.get("escalation_ts"), escalation_tt=d.get("escalation_tt"), managed_object=mo, pool=mo.pool, ip=mo.address, profile=mo.profile, object_profile=mo.object_profile, vendor=mo.vendor, platform=mo.platform, version=mo.version, administrative_domain=mo.administrative_domain, segment=mo.segment, container=mo.container, x=mo.x, y=mo.y, reboots=n_reboots, services=[{ "profile": ServiceProfile.get_by_id(ss["profile"]).bi_id, "summary": ss["summary"], } for ss in d.get("direct_services", [])], subscribers=[{ "profile": SubscriberProfile.get_by_id(ss["profile"]).bi_id, "summary": ss["summary"], } for ss in d.get("direct_subscribers", [])], # location=mo.container.get_address_text() ack_user=d.get("ack_user", ""), ack_ts=d.get("ack_ts"), ) nr += 1 self.last_ts = d["clear_timestamp"] self.alarm_stream.finish() return nr
def api_report( self, request, from_date, to_date, o_format, administrative_domain=None, columns=None, source="both", alarm_class=None, enable_autowidth=False, ): def row(row): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return smart_text(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] cols = [ "id", "alarm_class", "alarm_from_ts", "alarm_to_ts", "alarm_tt", "object_name", "object_address", "object_admdomain", "log_timestamp", "log_source", "log_message", # "tt", # "escalation_ts", ] header_row = [ "ID", _("ALARM_CLASS"), _("ALARM_FROM_TS"), _("ALARM_TO_TS"), _("ALARM_TT"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_ADMDOMAIN"), _("LOG_TIMESTAMP"), _("LOG_SOURCE"), _("LOG_MESSAGE"), ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } mos = ManagedObject.objects.filter() ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) if not ads: return HttpResponse( "<html><body>Permission denied: Invalid Administrative Domain</html></body>" ) else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass addr_map = { mo[0]: (mo[1], mo[2]) for mo in mos.values_list("id", "name", "address") } # Active Alarms coll = ActiveAlarm._get_collection() for aa in coll.aggregate([ { "$match": match }, { "$unwind": "$log" }, { "$match": { "log.source": { "$exists": True, "$ne": None } } }, { "$project": { "timestamp": 1, "managed_object": 1, "alarm_class": 1, "escalation_tt": 1, "adm_path": 1, "log": 1, } }, { "$sort": { "_id": 1, "log.timestamp": 1 } }, ]): r += [ translate_row( row([ smart_text(aa["_id"]), AlarmClass.get_by_id(aa["alarm_class"]).name, aa["timestamp"], "", aa.get("escalation_tt", ""), addr_map[aa["managed_object"]][0], addr_map[aa["managed_object"]][1], AdministrativeDomain.get_by_id( aa["adm_path"][-1]).name, aa["log"]["timestamp"], aa["log"]["source"], aa["log"]["message"], ]), cmap, ) ] # Active Alarms coll = ArchivedAlarm._get_collection() for aa in coll.aggregate([ { "$match": match }, { "$unwind": "$log" }, { "$match": { "log.source": { "$exists": True } } }, { "$project": { "timestamp": 1, "clear_timestamp": 1, "managed_object": 1, "alarm_class": 1, "escalation_tt": 1, "adm_path": 1, "log": 1, } }, { "$sort": { "_id": 1, "log.timestamp": 1 } }, ]): r += [ translate_row( row([ smart_text(aa["_id"]), AlarmClass.get_by_id(aa["alarm_class"]).name, aa["timestamp"], aa["clear_timestamp"], aa.get("escalation_tt", ""), addr_map[aa["managed_object"]][0], addr_map[aa["managed_object"]][1], AdministrativeDomain.get_by_id( aa["adm_path"][-1]).name, aa["log"]["timestamp"], aa["log"]["source"], aa["log"]["message"], ]), cmap, ) ] filename = "alarm_comments.csv" if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s"' % filename writer = csv.writer(response) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerow(columns) writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr(filename, f.read()) zf.filename = "%s.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarm_comments.xlsx"' response.close() return response
def get_ac_pm_thresholds(): return AlarmClass.get_by_name("NOC | PM | Out of Thresholds")
def api_report( self, request, from_date, to_date, o_format, min_duration=0, max_duration=0, min_objects=0, min_subscribers=0, segment=None, administrative_domain=None, selector=None, ex_selector=None, columns=None, source="both", alarm_class=None, subscribers=None, enable_autowidth=False, ): def row(row, container_path, segment_path): def qe(v): if v is None: return "" if isinstance(v, unicode): return v.encode("utf-8") elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v r = [qe(x) for x in row] if len(container_path) < self.CONTAINER_PATH_DEPTH: container_path += [""] * (self.CONTAINER_PATH_DEPTH - len(container_path)) else: container_path = container_path[:self.CONTAINER_PATH_DEPTH] if len(segment_path) < self.SEGMENT_PATH_DEPTH: segment_path += [""] * (self.SEGMENT_PATH_DEPTH - len(segment_path)) else: segment_path = segment_path[:self.SEGMENT_PATH_DEPTH] return r + container_path + segment_path def translate_row(row, cmap): return [row[i] for i in cmap] cols = ([ "id", "root_id", "from_ts", "to_ts", "duration_sec", "object_name", "object_address", "object_hostname", "object_profile", "object_admdomain", "object_platform", "object_version", "alarm_class", "alarm_subject", "maintenance", "objects", "subscribers", "tt", "escalation_ts", "location", "container_address", ] + ["container_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["segment_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) header_row = ( [ "ID", _("ROOT_ID"), _("FROM_TS"), _("TO_TS"), _("DURATION_SEC"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_HOSTNAME"), _("OBJECT_PROFILE"), _("OBJECT_ADMDOMAIN"), _("OBJECT_PLATFORM"), _("OBJECT_VERSION"), _("ALARM_CLASS"), _("ALARM_SUBJECT"), _("MAINTENANCE"), _("OBJECTS"), _("SUBSCRIBERS"), _("TT"), _("ESCALATION_TS"), _("LOCATION"), _("CONTAINER_ADDRESS"), ] + ["CONTAINER_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["SEGMENT_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) subscribers_profile = self.default_subscribers_profile if subscribers: subscribers_profile = set( SubscriberProfile.objects.filter( id__in=subscribers.split(",")).scalar("id")) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } match_duration = {"duration": {"$gte": min_duration}} if max_duration: match_duration = { "duration": { "$gte": min_duration, "$lte": max_duration } } mos = ManagedObject.objects.filter(is_managed=True) if segment: try: match["segment_path"] = bson.ObjectId(segment) except bson.errors.InvalidId: pass ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) else: ads = administrative_domain else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if ex_selector: ex_selector = ManagedObjectSelector.get_by_id(int(ex_selector)) mos = mos.exclude(ex_selector.Q) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass mos_id = list(mos.order_by("id").values_list("id", flat=True)) mo_hostname = {} maintenance = [] if mos_id and (selector or ex_selector): match["managed_object"] = {"$in": mos_id} if "maintenance" in columns.split(","): maintenance = Maintenance.currently_affected() if "object_hostname" in columns.split(","): mo_hostname = ReportObjectsHostname1(sync_ids=mos_id) mo_hostname = mo_hostname.get_dictionary() moss = ReportAlarmObjects(mos_id).get_all() # container_lookup = ReportContainer(mos_id) container_lookup = None subject = "alarm_subject" in columns loc = AlarmApplication([]) if source in ["archive", "both"]: # Archived Alarms for a in (ArchivedAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [ { "$subtract": ["$clear_timestamp", "$timestamp"] }, 1000, ] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): if int(a["managed_object"]) not in moss: continue dt = a["clear_timestamp"] - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ str(a["_id"]), str(a["root"]) if a.get("root") else "", a["timestamp"], a["clear_timestamp"], str(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], mo_hostname.get(a["managed_object"], ""), Profile.get_by_id( moss[a["managed_object"]][3]).name if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", Firmware.get_by_id( moss[a["managed_object"]][10]) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ArchivedAlarm.objects.get( id=a["_id"]).subject if subject else "", "", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(l for l in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if l), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] # Active Alarms if source in ["active", "both"]: for a in (ActiveAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED). aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [{ "$subtract": [fd, "$timestamp"] }, 1000] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): dt = fd - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ str(a["_id"]), str(a["root"]) if a.get("root") else "", a["timestamp"], # a["clear_timestamp"], "", str(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], mo_hostname.get(a["managed_object"], ""), Profile.get_by_id(moss[a["managed_object"]][3]) if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", Firmware.get_by_id( moss[a["managed_object"]][10]) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ActiveAlarm.objects.get( id=a["_id"]).subject if subject else None, "Yes" if a["managed_object"] in maintenance else "No", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(l for l in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if l), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="alarms.csv"' writer = csv.writer(response) writer.writerows(r) return response elif o_format == "xlsx": response = StringIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarms.xlsx"' response.close() return response
class MetricsCheck(DiscoveryCheck): """ MAC discovery """ name = "metrics" required_script = "get_metrics" _object_profile_metrics = cachetools.TTLCache(1000, 60) _interface_profile_metrics = cachetools.TTLCache(1000, 60) _slaprofile_metrics = cachetools.TTLCache(1000, 60) S_OK = 0 S_WARN = 1 S_ERROR = 2 SMAP = { 0: "ok", 1: "warn", 2: "error" } SEV_MAP = { 1: 2000, 2: 3000 } AC_PM_THRESHOLDS = AlarmClass.get_by_name("NOC | PM | Out of Thresholds") AC_PM_LOW_ERROR = AlarmClass.get_by_name("NOC | PM | Low Error") AC_PM_HIGH_ERROR = AlarmClass.get_by_name("NOC | PM | High Error") AC_PM_LOW_WARN = AlarmClass.get_by_name("NOC | PM | Low Warning") AC_PM_HIGH_WARN = AlarmClass.get_by_name("NOC | PM | High Warning") SLA_CAPS = [ "Cisco | IP | SLA | Probes" ] def __init__(self, *args, **kwargs): super(MetricsCheck, self).__init__(*args, **kwargs) self.id_count = itertools.count() self.id_metrics = {} @classmethod @cachetools.cachedmethod( operator.attrgetter("_object_profile_metrics"), lock=lambda _: metrics_lock ) def get_object_profile_metrics(cls, p_id): r = {} opr = ManagedObjectProfile.get_by_id(id=p_id) if not opr: return r for m in opr.metrics: mt_id = m.get("metric_type") if not mt_id: continue mt = MetricType.get_by_id(mt_id) if not mt: continue le = m.get("low_error") lw = m.get("low_warn") he = m.get("high_error") hw = m.get("high_warn") lew = AlarmSeverity.severity_for_weight(int(m.get("low_error_weight", 10))) lww = AlarmSeverity.severity_for_weight(int(m.get("low_warn_weight", 1))) hew = AlarmSeverity.severity_for_weight(int(m.get("high_error_weight", 1))) hww = AlarmSeverity.severity_for_weight(int(m.get("high_warn_weight", 10))) threshold_profile = None if m.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id(m.get("threshold_profile")) r[mt.name] = MetricConfig( mt, m.get("enable_box", True), m.get("enable_periodic", True), m.get("is_stored", True), m.get("window_type", "m"), int(m.get("window", 1)), m.get("window_function", "last"), m.get("window_config"), m.get("window_related", False), int(le) if le is not None else None, int(lw) if lw is not None else None, int(hw) if hw is not None else None, int(he) if he is not None else None, lew, lww, hww, hew, threshold_profile, le is not None or lw is not None or he is not None or hw is not None ) return r @staticmethod def quote_path(path): """ Convert path list to ClickHouse format :param path: :return: """ return "[%s]" % ",".join("'%s'" % p for p in path) @staticmethod def config_from_settings(m): """ Returns MetricConfig from .metrics field :param m: :return: """ return MetricConfig( m.metric_type, m.enable_box, m.enable_periodic, m.is_stored, m.window_type, m.window, m.window_function, m.window_config, m.window_related, m.low_error, m.low_warn, m.high_warn, m.high_error, AlarmSeverity.severity_for_weight(m.low_error_weight), AlarmSeverity.severity_for_weight(m.low_warn_weight), AlarmSeverity.severity_for_weight(m.high_warn_weight), AlarmSeverity.severity_for_weight(m.high_error_weight), m.threshold_profile, m.low_error is not None or m.low_warn is not None or m.high_warn is not None or m.high_error is not None ) @classmethod @cachetools.cachedmethod( operator.attrgetter("_interface_profile_metrics"), lock=lambda _: metrics_lock ) def get_interface_profile_metrics(cls, p_id): r = {} ipr = InterfaceProfile.get_by_id(id=p_id) if not ipr: return r for m in ipr.metrics: r[m.metric_type.name] = cls.config_from_settings(m) return r @classmethod @cachetools.cachedmethod( operator.attrgetter("_slaprofile_metrics"), lock=lambda _: metrics_lock) def get_slaprofile_metrics(cls, p_id): r = {} spr = SLAProfile.get_by_id(p_id) if not spr: return r for m in spr.metrics: r[m.metric_type.name] = cls.config_from_settings(m) return r def get_object_metrics(self): """ Populate metrics list with objects metrics :return: """ metrics = [] o_metrics = self.get_object_profile_metrics(self.object.object_profile.id) self.logger.debug("Object metrics: %s", o_metrics) for metric in o_metrics: if ((self.is_box and not o_metrics[metric].enable_box) or (self.is_periodic and not o_metrics[metric].enable_periodic)): continue m_id = next(self.id_count) metrics += [{ "id": m_id, "metric": metric }] self.id_metrics[m_id] = o_metrics[metric] if not metrics: self.logger.info("Object metrics are not configured. Skipping") return metrics def get_subinterfaces(self): subs = defaultdict(list) # interface id -> [{"name":, "ifindex":}] for si in SubInterface._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).find({ "managed_object": self.object.id }, { "name": 1, "interface": 1, "ifindex": 1 }): subs[si["interface"]] += [{ "name": si["name"], "ifindex": si.get("ifindex") }] return subs def get_interface_metrics(self): """ Populate metrics list with interface metrics :return: """ subs = None metrics = [] for i in Interface._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).find({ "managed_object": self.object.id, "type": "physical" }, { "_id": 1, "name": 1, "ifindex": 1, "profile": 1 }): ipr = self.get_interface_profile_metrics(i["profile"]) self.logger.debug("Interface %s. ipr=%s", i["name"], ipr) if not ipr: continue # No metrics configured i_profile = InterfaceProfile.get_by_id(i["profile"]) if i_profile.allow_subinterface_metrics and subs is None: # Resolve subinterfaces subs = self.get_subinterfaces() ifindex = i.get("ifindex") for metric in ipr: if ((self.is_box and not ipr[metric].enable_box) or (self.is_periodic and not ipr[metric].enable_periodic)): continue m_id = next(self.id_count) m = { "id": m_id, "metric": metric, "path": ["", "", "", i["name"]] } if ifindex is not None: m["ifindex"] = ifindex metrics += [m] self.id_metrics[m_id] = ipr[metric] if i_profile.allow_subinterface_metrics: for si in subs[i["_id"]]: m_id = next(self.id_count) m = { "id": m_id, "metric": metric, "path": ["", "", "", i["name"], si["name"]] } if si["ifindex"] is not None: m["ifindex"] = si["ifindex"] metrics += [m] self.id_metrics[m_id] = ipr[metric] if not metrics: self.logger.info("Interface metrics are not configured. Skipping") return metrics def get_sla_metrics(self): if not self.has_any_capability(self.SLA_CAPS): self.logger.info("SLA not configured, skipping SLA metrics") metrics = [] for p in SLAProbe._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).find({ "managed_object": self.object.id }, { "name": 1, "group": 1, "profile": 1, "type": 1 }): if not p.get("profile"): self.logger.debug("Probe %s has no profile. Skipping", p["name"]) continue pm = self.get_slaprofile_metrics(p["profile"]) if not pm: self.logger.debug( "Probe %s has profile '%s' with no configured metrics. " "Skipping", p["name"], p.profile.name ) continue for metric in pm: if ((self.is_box and not pm[metric].enable_box) or (self.is_periodic and not pm[metric].enable_periodic)): continue m_id = next(self.id_count) metrics += [{ "id": m_id, "metric": metric, "path": [p.get("group", ""), p["name"]], "sla_type": p["type"] }] self.id_metrics[m_id] = pm[metric] if not metrics: self.logger.info("SLA metrics are not configured. Skipping") return metrics def process_result(self, result): """ Process IGetMetrics result :param result: :return: """ # Restore last counter state if self.has_artefact("reboot"): self.logger.info( "Resetting counter context due to detected reboot" ) self.job.context["counters"] = {} counters = self.job.context["counters"] alarms = [] data = defaultdict(dict) n_metrics = 0 mo_id = self.object.bi_id ts_cache = {} # timestamp -> (date, ts) # for m in result: path = m.path cfg = self.id_metrics.get(m.id) if m.type in MT_COUNTER_DELTA: # Counter type if path: key = "%x|%s" % ( cfg.metric_type.bi_id, "|".join(str(p) for p in path) ) else: key = "%x" % cfg.metric_type.bi_id # Restore old value and save new r = counters.get(key) counters[key] = (m.ts, m.value) if r is None: # No stored state self.logger.debug( "[%s] COUNTER value is not found. " "Storing and waiting for a new result", m.label ) continue # Calculate counter self.logger.debug( "[%s] Old value: %s@%s, new value: %s@%s.", m.label, r[1], r[0], m.value, m.ts ) if m.type == MT_COUNTER: cv = self.convert_counter(m, r) else: cv = self.convert_delta(m, r) if cv is None: # Counter stepback or other errors # Remove broken value self.logger.debug( "[%s] Counter stepback from %s@%s to %s@%s: Skipping", m.label, r[1], r[0], m.value, m.ts ) del counters[key] continue m.value = cv m.abs_value = cv * m.scale elif m.type == MT_BOOL: # Convert boolean type m.abs_value = 1 if m.value else 0 else: # Gauge m.abs_value = m.value * m.scale self.logger.debug( "[%s] Measured value: %s. Scale: %s. Resulting value: %s", m.label, m.value, m.scale, m.abs_value ) # Schedule to store if cfg.is_stored: tsc = ts_cache.get(m.ts) if not tsc: lt = time.localtime(m.ts // 1000000000) tsc = ( time.strftime("%Y-%m-%d", lt), time.strftime("%Y-%m-%d %H:%M:%S", lt) ) ts_cache[m.ts] = tsc if path: pk = "%s\t%s\t%d\t%s" % ( tsc[0], tsc[1], mo_id, self.quote_path(path) ) table = "%s.date.ts.managed_object.path" % cfg.metric_type.scope.table_name else: pk = "%s\t%s\t%d" % (tsc[0], tsc[1], mo_id) table = "%s.date.ts.managed_object" % cfg.metric_type.scope.table_name field = cfg.metric_type.field_name try: data[table, pk][field] = cfg.metric_type.clean_value(m.abs_value) except ValueError as e: self.logger.info( "[%s] Cannot clean value %s: %s", m.label, m.abs_value, e ) continue n_metrics += 1 if cfg.process_thresholds and m.abs_value is not None: alarms += self.process_thresholds(m, cfg) return n_metrics, data, alarms def handler(self): self.logger.info("Collecting metrics") # Build get_metrics input parameters metrics = self.get_object_metrics() metrics += self.get_interface_metrics() metrics += self.get_sla_metrics() if not metrics: self.logger.info("No metrics configured. Skipping") return # Collect metrics self.logger.debug("Collecting metrics: %s", metrics) result = [ MData(**r) for r in self.object.scripts.get_metrics(metrics=metrics) ] if not result: self.logger.info("No metrics found") return # Process results n_metrics, data, alarms = self.process_result(result) # Send metrics if n_metrics: self.logger.info("Spooling %d metrics", n_metrics) self.send_metrics(data) # Set up threshold alarms self.logger.info("%d alarms detected", len(alarms)) self.job.update_umbrella( self.AC_PM_THRESHOLDS, alarms ) def convert_delta(self, m, r): """ Calculate value from delta, gently handling overflows :param m: MData :param r: Old state (ts, value) """ if m.value < r[1]: # Counter decreased, either due wrap or stepback if r[1] <= MAX31: mc = MAX31 elif r[1] <= MAX32: mc = MAX32 else: mc = MAX64 # Direct distance d_direct = r[1] - m.value # Wrap distance d_wrap = m.value + (mc - r[1]) if d_direct < d_wrap: # Possible counter stepback # Skip value self.logger.debug( "[%s] Counter stepback: %s -> %s", m.label, r[1], m.value ) return None else: # Counter wrap self.logger.debug( "[%s] Counter wrap: %s -> %s", m.label, r[1], m.value ) return d_wrap else: return m.value - r[1] def convert_counter(self, m, r): """ Calculate value from counter, gently handling overflows :param m: MData :param r: Old state (ts, value) """ dt = (float(m.ts) - float(r[0])) / NS delta = self.convert_delta(m, r) if delta is None: return delta return float(delta) / dt def get_window_function(self, m, cfg): """ Check thresholds :param m: dict with metric result :param cfg: MetricConfig :return: Value or None """ # Build window state key if m.path: key = "%x|%s" % ( cfg.metric_type.bi_id, "|".join(str(p) for p in m.path) ) else: key = "%x" % cfg.metric_type.bi_id # states = self.job.context["metric_windows"] value = m.abs_value ts = m.ts // 1000000000 # Do not store single-value windows drop_window = cfg.window_type == "m" and cfg.window == 1 # Restore window if drop_window: window = [(ts, value)] window_full = True if key in states: del states[key] else: window = states.get(key, []) window += [(ts, value)] # Trim window according to policy if cfg.window_type == WT_MEASURES: # Leave fixed amount of measures window = window[-cfg.window:] window_full = len(window) == cfg.window elif cfg.window_type == WT_TIME: # Time-based window window_full = ts - window[0][0] >= cfg.window while ts - window[0][0] > cfg.window: window.pop(0) else: self.logger.error( "Cannot calculate thresholds for %s (%s): Invalid window type '%s'", m.metric, m.path, cfg.window_type ) return None # Store back to context states[key] = window if not window_full: self.logger.error( "Cannot calculate thresholds for %s (%s): Window is not filled", m.metric, m.path ) return None # Process window function wf = get_window_function(cfg.window_function) if not wf: self.logger.error( "Cannot calculate thresholds for %s (%s): Invalid window function %s", m.metric, m.path, cfg.window_function ) return None try: return wf(window, cfg.window_config) except ValueError as e: self.logger.error( "Cannot calculate thresholds for %s (%s): %s", m.metric, m.path, e ) return None def process_thresholds(self, m, cfg): """ Check thresholds :param m: dict with metric result :param cfg: MetricConfig :return: List of umbrella alarm details """ w_value = self.get_window_function(m, cfg) alarms = [] if w_value is None: return alarms # Check thresholds path = m.metric if m.path: path += " | ".join(m.path) alarm_cfg = None if cfg.low_error is not None and w_value <= cfg.low_error: alarm_cfg = { "alarm_class": self.AC_PM_LOW_ERROR, "path": path, "severity": cfg.low_error_severity, "vars": { "path": path, "metric": m.metric, "value": w_value, "threshold": cfg.low_error, "window_type": cfg.window_type, "window": cfg.window, "window_function": cfg.window_function } } elif cfg.low_warn is not None and w_value <= cfg.low_warn: alarm_cfg = { "alarm_class": self.AC_PM_LOW_WARN, "path": path, "severity": cfg.low_warn_severity, "vars": { "path": path, "metric": m.metric, "value": w_value, "threshold": cfg.low_warn, "window_type": cfg.window_type, "window": cfg.window, "window_function": cfg.window_function } } elif cfg.high_error is not None and w_value >= cfg.high_error: alarm_cfg = { "alarm_class": self.AC_PM_HIGH_ERROR, "path": path, "severity": cfg.high_error_severity, "vars": { "path": path, "metric": m.metric, "value": w_value, "threshold": cfg.high_error, "window_type": cfg.window_type, "window": cfg.window, "window_function": cfg.window_function } } elif cfg.high_warn is not None and w_value >= cfg.high_warn: alarm_cfg = { "alarm_class": self.AC_PM_HIGH_WARN, "path": path, "severity": cfg.high_warn_severity, "vars": { "path": path, "metric": m.metric, "value": w_value, "threshold": cfg.high_warn, "window_type": cfg.window_type, "window": cfg.window, "window_function": cfg.window_function } } if alarm_cfg is not None: alarms += [alarm_cfg] # Apply umbrella filter handler if cfg.threshold_profile and cfg.threshold_profile.umbrella_filter_handler: try: handler = get_handler(cfg.threshold_profile.umbrella_filter_handler) if handler: alarms = [handler(self, a) for a in alarms] # Remove filtered alarms alarms = [a for a in alarms if a] except Exception as e: self.logger.error("Exception when loading handler %s", e) return alarms def send_metrics(self, data): """ Convert collected metrics to Service.register_metric format :param data: (table fields, pk) -> field -> value :return: """ # Normalized data # fields -> records chains = defaultdict(list) # Normalize data for (fields, pk), values in six.iteritems(data): # Sorted list of fields f = sorted(values) record_fields = "%s.%s" % (fields, ".".join(f)) if isinstance(record_fields, unicode): record_fields = record_fields.encode("utf-8") record = "%s\t%s" % (pk, "\t".join(str(values[fn]) for fn in f)) if isinstance(record, unicode): record = record.encode("utf-8") chains[record_fields] += [ record ] # Spool data for f in chains: self.service.register_metrics(f, chains[f])
def api_report( self, request, from_date, to_date, o_format, min_duration=0, max_duration=0, min_objects=0, min_subscribers=0, segment=None, administrative_domain=None, selector=None, ex_selector=None, columns=None, source="both", alarm_class=None, subscribers=None, enable_autowidth=False, ): def row(row, container_path, segment_path): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return smart_text(v) else: return v r = [qe(x) for x in row] if len(container_path) < self.CONTAINER_PATH_DEPTH: container_path += [""] * (self.CONTAINER_PATH_DEPTH - len(container_path)) else: container_path = container_path[:self.CONTAINER_PATH_DEPTH] if len(segment_path) < self.SEGMENT_PATH_DEPTH: segment_path += [""] * (self.SEGMENT_PATH_DEPTH - len(segment_path)) else: segment_path = segment_path[:self.SEGMENT_PATH_DEPTH] return r + container_path + segment_path def translate_row(row, cmap): return [row[i] for i in cmap] cols = ([ "id", "root_id", "from_ts", "to_ts", "duration_sec", "object_name", "object_address", "object_hostname", "object_profile", "object_admdomain", "object_platform", "object_version", "alarm_class", "alarm_subject", "maintenance", "objects", "subscribers", "tt", "escalation_ts", "location", "container_address", ] + ["container_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["segment_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) header_row = ( [ "ID", _("ROOT_ID"), _("FROM_TS"), _("TO_TS"), _("DURATION_SEC"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_HOSTNAME"), _("OBJECT_PROFILE"), _("OBJECT_ADMDOMAIN"), _("OBJECT_PLATFORM"), _("OBJECT_VERSION"), _("ALARM_CLASS"), _("ALARM_SUBJECT"), _("MAINTENANCE"), _("OBJECTS"), _("SUBSCRIBERS"), _("TT"), _("ESCALATION_TS"), _("LOCATION"), _("CONTAINER_ADDRESS"), ] + ["CONTAINER_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["SEGMENT_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) subscribers_profile = self.default_subscribers_profile if subscribers: subscribers_profile = set( SubscriberProfile.objects.filter( id__in=subscribers.split(",")).scalar("id")) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } match_duration = {"duration": {"$gte": min_duration}} if max_duration: match_duration = { "duration": { "$gte": min_duration, "$lte": max_duration } } mos = ManagedObject.objects.filter(is_managed=True) if segment: try: match["segment_path"] = bson.ObjectId(segment) except bson.errors.InvalidId: pass ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) if not ads: return HttpResponse( "<html><body>Permission denied: Invalid Administrative Domain</html></body>" ) else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if ex_selector: ex_selector = ManagedObjectSelector.get_by_id(int(ex_selector)) mos = mos.exclude(ex_selector.Q) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass mos_id = list(mos.order_by("id").values_list("id", flat=True)) mo_hostname = {} maintenance = [] if mos_id and (selector or ex_selector): match["managed_object"] = {"$in": mos_id} if "maintenance" in columns.split(","): maintenance = Maintenance.currently_affected() if "object_hostname" in columns.split(","): mo_hostname = ReportObjectsHostname1(sync_ids=mos_id) mo_hostname = mo_hostname.get_dictionary() moss = ReportAlarmObjects(mos_id).get_all() # container_lookup = ReportContainer(mos_id) container_lookup = None subject = "alarm_subject" in columns loc = AlarmApplication([]) if source in ["archive", "both"]: # Archived Alarms for a in (ArchivedAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [ { "$subtract": ["$clear_timestamp", "$timestamp"] }, 1000, ] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): if int(a["managed_object"]) not in moss: continue dt = a["clear_timestamp"] - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ smart_text(a["_id"]), smart_text(a["root"]) if a.get("root") else "", a["timestamp"], a["clear_timestamp"], smart_text(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], smart_text( mo_hostname.get(a["managed_object"], "")), Profile.get_by_id( moss[a["managed_object"]][3]).name if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", smart_text( Firmware.get_by_id( moss[a["managed_object"]][10]).version) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ArchivedAlarm.objects.get( id=a["_id"]).subject if subject else "", "", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(ll for ll in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if ll), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] # Active Alarms if source in ["active", "both"]: datenow = datetime.datetime.now() for a in (ActiveAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED). aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [{ "$subtract": [fd, "$timestamp"] }, 1000] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): dt = datenow - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ smart_text(a["_id"]), smart_text(a["root"]) if a.get("root") else "", a["timestamp"], # a["clear_timestamp"], "", smart_text(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], smart_text( mo_hostname.get(a["managed_object"], "")), Profile.get_by_id(moss[a["managed_object"]][3]) if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", smart_text( Firmware.get_by_id( moss[a["managed_object"]][10]).version) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ActiveAlarm.objects.get( id=a["_id"]).subject if subject else None, "Yes" if a["managed_object"] in maintenance else "No", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(ll for ll in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if ll), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] if source in ["long_archive"]: o_format = "csv_zip" columns = [ "ALARM_ID", "MO_ID", "OBJECT_PROFILE", "VENDOR", "PLATFORM", "VERSION", "OPEN_TIMESTAMP", "CLOSE_TIMESTAMP", "LOCATION", "", "POOL", "ADM_DOMAIN", "MO_NAME", "IP", "ESCALATION_TT", "DURATION", "SEVERITY", "REBOOTS", ] from noc.core.clickhouse.connect import connection ch = connection() fd = datetime.datetime.strptime(from_date, "%d.%m.%Y") td = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) if td - fd > datetime.timedelta(days=390): return HttpResponseBadRequest( _("Report more than 1 year not allowed. If nedeed - request it from Administrator" )) ac = AlarmClass.objects.get( name="NOC | Managed Object | Ping Failed") subs = ", ".join( "subscribers.summary[indexOf(subscribers.profile, '%s')] as `%s`" % (sp.bi_id, sp.name) for sp in SubscriberProfile.objects.filter().order_by("name")) if subs: columns += [ sp.name for sp in SubscriberProfile.objects.filter().order_by("name") ] r = ch.execute(LONG_ARCHIVE_QUERY % ( ", %s" % subs if subs else "", fd.date().isoformat(), td.date().isoformat(), ac.bi_id, )) filename = "alarms.csv" if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s"' % filename writer = csv.writer(response) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerow(columns) writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr(filename, f.read()) zf.filename = "%s.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarms.xlsx"' response.close() return response