def get_data(self, request, pool=None, obj_profile=None, **kwargs): data = [] if pool: pool = Pool.get_by_id(pool) else: pool = Pool.get_by_name("default") # Get all managed objects mos = ManagedObject.objects.filter(is_managed=True, pool=pool) if not request.user.is_superuser: mos = ManagedObject.objects.filter( is_managed=True, pool=pool, administrative_domain__in=UserAccess.get_domains(request.user), ) if obj_profile: mos = mos.filter(object_profile=obj_profile) columns = (_("Managed Object"), _("Address"), _("Object"), _("Capabilities")) for mo in mos: mo.get_caps() data += [(mo.name, mo.address, _("Main"), ";".join(mo.get_caps()))] for i in Interface.objects.filter(managed_object=mo): if i.type == "SVI": continue data += [(mo.name, mo.address, i.name, ";".join(i.enabled_protocols))] return self.from_dataset(title=self.title, columns=columns, data=data)
def get_object(cls, id): mo = ManagedObject.objects.filter(id=id).values_list( "id", "is_managed", "pool", "fm_pool", "address", "trap_community", "trap_source_ip", "trap_source_type", "event_processing_policy", "object_profile__event_processing_policy", )[:1] if not mo: raise KeyError() ( mo_id, is_managed, pool, fm_pool, address, trap_community, trap_source_ip, trap_source_type, event_processing_policy, mop_event_processing_policy, ) = mo[0] # Process event policy if (not is_managed or (str(event_processing_policy) == "P" and str(mop_event_processing_policy) != "E") or str(event_processing_policy) == "D" or str(trap_source_type) == "d"): raise KeyError() # Process trap sources pool = str(Pool.get_by_id(pool).name) r = { "id": str(mo_id), "pool": pool, "fm_pool": str(Pool.get_by_id(fm_pool).name) if fm_pool else pool, "addresses": [], "trap_community": trap_community, } if str(trap_source_type) == "m" and address: r["addresses"] += [str(address)] elif str(trap_source_type) == "s" and trap_source_ip: r["addresses"] = [str(trap_source_ip)] elif trap_source_type == "l": # Loopback address r["addresses"] = cls._get_loopback_addresses(mo_id) if not r["addresses"]: raise KeyError() elif trap_source_type == "a": # All interface addresses r["addresses"] = cls._get_all_addresses(mo_id) if not r["addresses"]: raise KeyError() else: raise KeyError() return r
def get_report_object(self, user=None, is_managed=None, adm=None, selector=None, pool=None, segment=None, ids=None): mos = ManagedObject.objects.filter() if user.is_superuser and not adm and not selector and not segment: mos = ManagedObject.objects.filter() if ids: mos = ManagedObject.objects.filter(id__in=[ids]) if is_managed is not None: mos = ManagedObject.objects.filter(is_managed=is_managed) if pool: p = Pool.get_by_name(pool or "default") mos = mos.filter(pool=p) if not user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(user)) if adm: ads = AdministrativeDomain.get_nested_ids(int(adm)) mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if segment: segment = NetworkSegment.objects.filter(id=segment).first() if segment: mos = mos.filter(segment__in=segment.get_nested_ids()) return mos
def get_object(cls, id): mo = ManagedObject.objects.filter(id=id).values_list( "id", "name", "bi_id", "is_managed", "pool", "address", "time_pattern", "object_profile__enable_ping", "object_profile__ping_interval", "object_profile__ping_policy", "object_profile__ping_size", "object_profile__ping_count", "object_profile__ping_timeout_ms", "object_profile__report_ping_rtt", "object_profile__report_ping_attempts")[:1] if not mo: raise KeyError() (mo_id, name, bi_id, is_managed, pool, address, time_pattern, enable_ping, ping_interval, ping_policy, ping_size, ping_count, ping_timeout_ms, report_ping_rtt, report_ping_attempts) = mo[0] if not is_managed or not address or not enable_ping or not ping_interval or ping_interval < 0: raise KeyError() r = { "id": str(mo_id), "pool": str(Pool.get_by_id(pool).name), "address": str(address), "interval": ping_interval, "policy": ping_policy, "size": ping_size, "count": ping_count, "timeout": ping_timeout_ms, "report_rtt": report_ping_rtt, "report_attempts": report_ping_attempts, "status": None, "name": name, "bi_id": bi_id } if time_pattern: r["time_expr"] = TimePattern.get_code(time_pattern) return r
def get_task_count(): """ Calculate discovery tasks :return: """ from django.db import connection cursor = connection.cursor() SQL = """SELECT mo.pool, mop.%s_discovery_interval, count(*) FROM sa_managedobject mo, sa_managedobjectprofile mop WHERE mo.object_profile_id = mop.id and mop.enable_%s_discovery = true and mo.is_managed = true GROUP BY mo.pool, mop.%s_discovery_interval; """ r = defaultdict(dict) r["all"]["sum_task_per_seconds"] = 0.0 r["all"]["box_task_per_seconds"] = 0.0 r["all"]["periodic_task_per_seconds"] = 0.0 for s in ("box", "periodic"): cursor.execute(SQL % (s, s, s)) for c in cursor.fetchall(): p = Pool.get_by_id(c[0]) r[p][c[1]] = c[2] if "sum_task_per_seconds" not in r[p]: r[p]["sum_task_per_seconds"] = 0.0 if "%s_task_per_seconds" % s not in r[p]: r[p]["%s_task_per_seconds" % s] = 0.0 r[p]["sum_task_per_seconds"] += float(c[2]) / float(c[1]) r[p]["%s_task_per_seconds" % s] += float(c[2]) / float(c[1]) r["all"]["sum_task_per_seconds"] += float(c[2]) / float(c[1]) r["all"]["%s_task_per_seconds" % s] += float(c[2]) / float(c[1]) return r
def handle_mirror(self, split=False, path=None, *args, **options): from noc.sa.models.managedobject import ManagedObject from noc.main.models.pool import Pool mirror = os.path.realpath(path) self.print("Mirroring to %s" % path) if self.repo == "config": for o_id, address, pool in self.progress( ManagedObject.objects.filter().values_list( "id", "address", "pool")): pool = Pool.get_by_id(pool) data = self.vcs.get(self.clean_id(o_id)) if data: if split: mpath = os.path.realpath( os.path.join(mirror, str(pool), str(address))) else: mpath = os.path.realpath( os.path.join(mirror, str(address))) if mpath.startswith(mirror): safe_rewrite(mpath, data) else: self.print(" !!! mirror path violation for" % address) self.print("Done")
def handle_estimate(self, device_count=None, box_interval=65400, periodic_interval=300, *args, **options): """ Calculate Resource needed job :param device_count: Count active device :param box_interval: Box discovery interval :param periodic_interval: Periodic discovery interval :param :return: """ if device_count: task_count = { Pool.get_by_name("default"): { "box_task_per_seconds": float(device_count) / float(box_interval), "periodic_task_per_seconds": float(device_count) / float(periodic_interval), } } job_avg = { Pool.get_by_name("default"): { "box": 120.0, # Time execute box discovery (average in seconds) "periodic": 10, # Time execute periodic discovery (average in seconds) } } else: task_count = self.get_task_count() job_avg = self.get_job_avg() for pool in task_count: if pool == "all" or not task_count[pool]: continue job_count = task_count[pool]["box_task_per_seconds"] * job_avg[ pool].get("box", 0) + task_count[pool][ "periodic_task_per_seconds"] * job_avg[pool].get( "periodic", 0) self.print("%20s %s" % ("Pool", "Threads est.")) self.print("%40s %d" % (pool.name, math.ceil(job_count)))
def get_object(cls, id): mo = ManagedObject.objects.filter(id=id).values_list( "id", "bi_id", "is_managed", "pool", "address", "syslog_source_ip", "syslog_source_type", "event_processing_policy", "object_profile__event_processing_policy", "syslog_archive_policy", "object_profile__syslog_archive_policy")[:1] if not mo: raise KeyError() (mo_id, bi_id, is_managed, pool, address, syslog_source_ip, syslog_source_type, event_processing_policy, mop_event_processing_policy, syslog_archive_policy, mop_syslog_archive_policy) = mo[0] # Check if object capable to receive syslog events if not is_managed or str(syslog_source_type) == "d": raise KeyError() # Get effective event processing policy event_processing_policy = str(event_processing_policy) mop_event_processing_policy = str(mop_event_processing_policy) effective_epp = event_processing_policy == "E" or ( event_processing_policy == "P" and mop_event_processing_policy == "E") # Get effective event archiving policy syslog_archive_policy = str(syslog_archive_policy) mop_syslog_archive_policy = str(mop_syslog_archive_policy) effective_sap = syslog_archive_policy == "E" or ( syslog_archive_policy == "P" and mop_syslog_archive_policy == "E") # Check syslog events may be processed if not effective_epp and not effective_sap: raise KeyError() # Process syslog sources r = { "id": str(mo_id), "bi_id": str(bi_id), "pool": str(Pool.get_by_id(pool).name), "addresses": [], "process_events": effective_epp, "archive_events": effective_sap } if syslog_source_type == "m" and address: # Managed Object's address r["addresses"] += [str(address)] elif syslog_source_type == "s" and syslog_source_ip: # Syslog source set manually r["addresses"] = [str(syslog_source_ip)] elif syslog_source_type == "l": # Loopback address r["addresses"] = cls._get_loopback_addresses(mo_id) if not r["addresses"]: raise KeyError() elif syslog_source_type == "a": # All interface addresses r["addresses"] = cls._get_all_addresses(mo_id) if not r["addresses"]: raise KeyError() else: raise KeyError() return r
def extra_query(self, q, order): extra = {} if "status" in q: extra["s"] = q["status"] if "ldur" in q: extra["ldur"] = {"$gte": int(q["ldur"])} if "pool" in q: extra["pool"] = Pool.get_by_id(q["pool"]).name return extra, []
def get_data(self, **kwargs): old = datetime.datetime.now() - datetime.timedelta( minutes=self.STALE_INTERVAL) data = [] for pool in Pool._get_collection().find({}, {"_id": 0, "name": 1}): scheduler = Scheduler("discovery", pool=pool["name"]) for r in scheduler.get_collection().find({ "runs": { "$gt": 1 }, "jcls": { "$regex": "_discovery$" }, "st": { "$lte": old } }): mo = ManagedObject.get_by_id(r["key"]) if not mo or not mo.is_managed: continue msg = "" if r["tb"]: tb = r["tb"] if "text" in tb and "code" in tb: if tb["text"].endswith("END OF TRACEBACK"): tb["text"] = "Job crashed" msg = "(%s) %s" % (tb["text"], tb["code"]) data += [[ mo.administrative_domain.name, mo.name, mo.profile.name, mo.platform.name, mo.version.name, mo.address, mo.segment.name, r["jcls"], humanize_distance(r["st"]), msg, ]] return self.from_dataset( title=self.title, columns=[ _("Admin. Domain"), _("Object"), _("Profile"), _("Platform"), _("Version"), _("Address"), _("Segment"), _("Job"), _("Last Success"), _("Reason"), ], data=sorted(data), enumerate=True, )
def get_data(self, request, pool=None, int_profile=None, mop=None, avail_status=None, **kwargs): data = [] mos = ManagedObject.objects.filter(is_managed=True) # % fixme remove. if not pool and request.user.is_superuser: pool = Pool.get_by_name("STAGEMO") if pool: mos = mos.filter(pool=pool) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if mop: mos = mos.filter(object_profile=mop) mos_ids = mos.values_list("id", flat=True) iface = Interface.objects.filter(managed_object__in=mos, profile=int_profile, type="physical").values_list( "managed_object", "name") res = [] n = 0 # Interface._get_collection() while mos_ids[(0 + n):(10000 + n)]: mos_ids_f = mos_ids[(0 + n):(10000 + n)] s_iface = {"%d.%s" % (mo.id, name) for mo, name in iface} of = ObjectFact.objects.filter( object__in=mos_ids_f, cls="subinterface", attrs__traffic_control_broadcast=False) a_f = {".".join((str(o.object.id), o.attrs["name"])) for o in of} res.extend(a_f.intersection(s_iface)) n += 10000 for s in res: mo, iface = s.split(".") mo = ManagedObject.get_by_id(mo) data += [(mo.name, mo.address, mo.profile.name, iface)] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("SA Profile"), _("Interface") ], data=data, )
def iter_metrics(self): for pool_id, pool_managed, pool_unmanaged in self.pg_execute(self.SQL_POOL_MO): pool = Pool.get_by_id(pool_id) if not pool: continue yield ("inventory_managedobject_managed", ("pool", pool.name)), pool_managed yield ("inventory_managedobject_unmanaged", ("pool", pool.name)), pool_unmanaged yield ( "inventory_managedobject_total", ("pool", pool.name), ), pool_managed + pool_unmanaged
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.clean_map["pool"] = Pool.get_by_name self.clean_map["fm_pool"] = lambda x: Pool.get_by_name(x ) if x else None self.clean_map["profile"] = Profile.get_by_name self.clean_map["static_service_groups"] = lambda x: [ str(x.id) for x in ResourceGroup.objects.filter(remote_id__in=x) ] self.clean_map["static_client_groups"] = lambda x: [ str(x.id) for x in ResourceGroup.objects.filter(remote_id__in=x) ]
def __init__(self, mos, avail_only=False, match=None): """ :param mos: :type mos: ManagedObject.objects.filter() """ self.mo_ids = list(mos.values_list("id", flat=True)) if avail_only: status = ObjectStatus.get_statuses(self.mo_ids) self.mo_ids = [s for s in status if status[s]] self.mos_pools = [Pool.get_by_id(p) for p in set(mos.values_list("pool", flat=True))] self.coll_name = "noc.schedules.discovery.%s" # @todo Good way for pipelines fill self.pipelines = {} self.match = match
def get_pool(name): pool = Pool(name=name) pool.save() return pool
def get_data( self, request, pool=None, obj_profile=None, selector=None, avail_status=None, profile_check_only=None, failed_scripts_only=None, filter_pending_links=None, filter_none_objects=None, filter_view_other=None, **kwargs ): data = [] match = None code_map = { "1": "Unknown error", "10000": "Unspecified CLI error", "10005": "Connection refused", "10001": "Authentication failed", "10002": "No super command defined", "10003": "No super privileges", "10004": "SSH Protocol error", } if pool: pool = Pool.get_by_id(pool) else: pool = Pool.objects.filter()[0] data += [SectionRow(name="Report by %s" % pool.name)] if selector: mos = ManagedObject.objects.filter(selector.Q) else: mos = ManagedObject.objects.filter(pool=pool, is_managed=True) if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) if obj_profile: mos = mos.filter(object_profile=obj_profile) if filter_view_other: mnp_in = list(ManagedObjectProfile.objects.filter(enable_ping=False)) mos = mos.filter(profile=Profile.objects.get(name=GENERIC_PROFILE)).exclude( object_profile__in=mnp_in ) if profile_check_only: match = { "$or": [ {"job.problems.suggest_cli": {"$exists": True}}, {"job.problems.suggest_snmp": {"$exists": True}}, {"job.problems.profile.": {"$regex": "Cannot detect profile"}}, {"job.problems.version.": {"$regex": "Remote error code 1000[1234]"}}, ] } elif failed_scripts_only: match = { "$and": [ {"job.problems": {"$exists": "true", "$ne": {}}}, {"job.problems.suggest_snmp": {"$exists": False}}, {"job.problems.suggest_cli": {"$exists": False}}, ] } elif filter_view_other: match = {"job.problems.suggest_snmp": {"$exists": False}} rdp = ReportDiscoveryProblem(mos, avail_only=avail_status, match=match) exclude_method = [] if filter_pending_links: exclude_method += ["lldp", "lacp", "cdp", "huawei_ndp"] for discovery in rdp: mo = ManagedObject.get_by_id(discovery["key"]) for method in [x for x in discovery["job"][0]["problems"] if x not in exclude_method]: problem = discovery["job"][0]["problems"][method] if filter_none_objects and not problem: continue if isinstance(problem, dict) and "" in problem: problem = problem.get("", "") if "Remote error code" in problem: problem = code_map.get(problem.split(" ")[-1], problem) if isinstance(problem, six.string_types): problem = problem.replace("\n", " ").replace("\r", " ") data += [ ( mo.name, mo.address, mo.profile.name, mo.administrative_domain.name, _("Yes") if mo.get_status() else _("No"), discovery["st"].strftime("%d.%m.%Y %H:%M") if "st" in discovery else "", method, problem, ) ] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("Profile"), _("Administrative Domain"), _("Avail"), _("Last successful discovery"), _("Discovery"), _("Error"), ], data=data, )
def api_report( self, request, o_format, is_managed=None, administrative_domain=None, selector=None, pool=None, segment=None, avail_status=False, columns=None, ids=None, enable_autowidth=False, ): def row(row): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] type_columns = ["Up/10G", "Up/1G", "Up/100M", "Down/-", "-"] cols = [ "object1_admin_domain", # "id", "object1_name", "object1_address", "object1_platform", "object1_segment", "object1_tags", "object1_iface", "object1_descr", "object1_speed", "object2_admin_domain", "object2_name", "object2_address", "object2_platform", "object2_segment", "object2_tags", "object2_iface", "object2_descr", "object2_speed", "link_proto", "last_seen", ] header_row = [ "OBJECT1_ADMIN_DOMAIN", "OBJECT1_NAME", "OBJECT1_ADDRESS", "OBJECT1_PLATFORM", "OBJECT1_SEGMENT", "OBJECT1_TAGS", "OBJECT1_IFACE", "OBJECT1_DESCR", "OBJECT1_SPEED", "OBJECT2_ADMIN_DOMAIN", "OBJECT2_NAME", "OBJECT2_ADDRESS", "OBJECT2_PLATFORM", "OBJECT2_SEGMENT", "OBJECT2_TAGS", "OBJECT2_IFACE", "OBJECT2_DESCR", "OBJECT2_SPEED", "LINK_PROTO", "LAST_SEEN", ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] if "interface_type_count" in columns.split(","): r[-1].extend(type_columns) # self.logger.info(r) # self.logger.info("---------------------------------") # print("-----------%s------------%s" % (administrative_domain, columns)) p = Pool.get_by_name(pool or "default") mos = ManagedObject.objects.filter() if request.user.is_superuser and not administrative_domain and not selector and not segment: mos = ManagedObject.objects.filter(pool=p) if ids: mos = ManagedObject.objects.filter(id__in=[ids]) if is_managed is not None: mos = ManagedObject.objects.filter(is_managed=is_managed) if pool: mos = mos.filter(pool=p) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if administrative_domain: ads = AdministrativeDomain.get_nested_ids( int(administrative_domain)) mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if segment: segment = NetworkSegment.objects.filter(id=segment).first() if segment: mos = mos.filter(segment__in=segment.get_nested_ids()) mos_id = list(mos.values_list("id", flat=True)) rld = ReportLinksDetail(mos_id) mo_resolv = { mo[0]: mo[1:] for mo in ManagedObject.objects.filter().values_list( "id", "administrative_domain__name", "name", "address", "segment", "platform", "labels", ) } for link in rld.out: if len(rld.out[link]) != 2: # Multilink or bad link continue s1, s2 = rld.out[link] seg1, seg2 = None, None if "object1_segment" in columns.split( ",") or "object2_segment" in columns.split(","): seg1, seg2 = mo_resolv[s1["mo"][0]][3], mo_resolv[s2["mo"] [0]][3] plat1, plat2 = None, None if "object1_platform" in columns.split( ",") or "object2_platform" in columns.split(","): plat1, plat2 = mo_resolv[s1["mo"][0]][4], mo_resolv[s2["mo"] [0]][4] r += [ translate_row( row([ mo_resolv[s1["mo"][0]][0], mo_resolv[s1["mo"][0]][1], mo_resolv[s1["mo"][0]][2], "" if not plat1 else Platform.get_by_id(plat1), "" if not seg1 else NetworkSegment.get_by_id(seg1), ";".join(mo_resolv[s1["mo"][0]][5] or []), s1["iface_n"][0], s1.get("iface_descr")[0] if s1.get("iface_descr") else "", s1.get("iface_speed")[0] if s1.get("iface_speed") else 0, mo_resolv[s2["mo"][0]][0], mo_resolv[s2["mo"][0]][1], mo_resolv[s2["mo"][0]][2], "" if not plat2 else Platform.get_by_id(plat2), "" if not seg2 else NetworkSegment.get_by_id(seg2), ";".join(mo_resolv[s2["mo"][0]][5] or []), s2["iface_n"][0], s2.get("iface_descr")[0] if s2.get("iface_descr") else "", s2.get("iface_speed")[0] if s2.get("iface_speed") else 0, s2.get("dis_method", ""), s2.get("last_seen", ""), ]), cmap, ) ] filename = "links_detail_report_%s" % datetime.datetime.now().strftime( "%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=",", quoting=csv.QUOTE_MINIMAL) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr("%s.csv" % filename, f.read()) zf.filename = "%s.csv.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.csv.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Objects") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") # response = HttpResponse( # content_type="application/x-ms-excel") response[ "Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response
def get_data(self, request, pool=None, selector=None, report_type=None, **kwargs): data = [] columns, columns_desr = [], [] r_map = [ (_("Not Available"), "2is1.3isp1.3is1"), (_("Failed to guess CLI credentials"), "2is1.6is1.3isp0.2isp1"), (_("Failed to guess SNMP community"), "2is1.6is1.3isp1.3is2.1isp1"), ] for x, y in r_map: columns += [y] columns_desr += [x] mos = ManagedObject.objects.filter() if pool: pool = Pool.get_by_id(pool) mos = mos.filter(pool=pool) data += [SectionRow(name=pool.name)] if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) mos = list(mos.values_list("id", flat=True).order_by("id")) mos_s = set(mos) report = ReportModelFilter() result = report.proccessed(",".join(columns)) mo_hostname = ReportObjectsHostname1(sync_ids=mos) mo_hostname = mo_hostname.get_dictionary() d_result = ReportDiscoveryResult(sync_ids=mos) d_result = d_result.get_dictionary() for col in columns: for mo_id in result[col.strip()].intersection(mos_s): mo = ManagedObject.get_by_id(mo_id) problem = self.decode_problem(d_result.get(mo_id)) if not problem and mo_id not in d_result: problem = "Discovery disabled" data += [( mo.name, mo.address, mo.administrative_domain.name, mo.profile.name, mo_hostname.get(mo.id, ""), mo.auth_profile if mo.auth_profile else "", mo.auth_profile.user if mo.auth_profile else mo.user, mo.auth_profile.snmp_ro if mo.auth_profile else mo.snmp_ro, _("No") if not mo.get_status() else _("Yes"), columns_desr[columns.index(col)], problem, )] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("Administrative Domain"), _("Profile"), _("Hostname"), _("Auth Profile"), _("Username"), _("SNMP Community"), _("Avail"), _("Error"), _("Error Detail"), ], data=data, )
def api_report(self, request, o_format, is_managed=None, administrative_domain=None, selector=None, pool=None, segment=None, avail_status=False, columns=None, ids=None): def row(row): def qe(v): if v is None: return "" if isinstance(v, unicode): return v.encode("utf-8") elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] type_columns = ["Up/10G", "Up/1G", "Up/100M", "Down/-", "-"] cols = [ "admin_domain", # "id", "object1_name", "object1_address", "object1_iface", "object2_name", "object2_address", "object2_iface", "link_proto", "last_seen" ] header_row = [ "ADMIN_DOMAIN", "OBJECT1_NAME", "OBJECT1_ADDRESS", "OBJECT1_IFACE", "OBJECT2_NAME", "OBJECT2_ADDRESS", "OBJECT2_IFACE", "LINK_PROTO", "LAST_SEEN" ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] if "interface_type_count" in columns.split(","): r[-1].extend(type_columns) # self.logger.info(r) # self.logger.info("---------------------------------") # print("-----------%s------------%s" % (administrative_domain, columns)) p = Pool.get_by_name(pool or "default") mos = ManagedObject.objects.filter() if request.user.is_superuser and not administrative_domain and not selector and not segment: mos = ManagedObject.objects.filter(pool=p) if ids: mos = ManagedObject.objects.filter(id__in=[ids]) if is_managed is not None: mos = ManagedObject.objects.filter(is_managed=is_managed) if pool: mos = mos.filter(pool=p) if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) if administrative_domain: ads = AdministrativeDomain.get_nested_ids(int(administrative_domain)) mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if segment: segment = NetworkSegment.objects.filter(id=segment).first() if segment: mos = mos.filter(segment__in=segment.get_nested_ids()) mos_id = list(mos.values_list("id", flat=True)) rld = ReportLinksDetail(mos_id) mo_resolv = dict((mo[0], mo[1:]) for mo in ManagedObject.objects.filter().values_list( "id", "administrative_domain__name", "name", "address")) for link in rld.out: if len(rld.out[link]) != 2: # Multilink or bad link continue s1, s2 = rld.out[link] r += [translate_row(row([ mo_resolv[s1["mo"][0]][0], mo_resolv[s1["mo"][0]][1], mo_resolv[s1["mo"][0]][2], s1["iface_n"][0], mo_resolv[s2["mo"][0]][1], mo_resolv[s2["mo"][0]][2], s2["iface_n"][0], s1.get("dis_method", ""), s1.get("last_seen", "") ]), cmap)] filename = "links_detail_report_%s" % datetime.datetime.now().strftime("%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = "attachment; filename=\"%s.csv\"" % filename writer = csv.writer(response, dialect='excel', delimiter=';') writer.writerows(r) return response elif o_format == "xlsx": with tempfile.NamedTemporaryFile(mode="wb") as f: wb = xlsxwriter.Workbook(f.name) ws = wb.add_worksheet("Objects") for rn, x in enumerate(r): for cn, c in enumerate(x): ws.write(rn, cn, c) ws.autofilter(0, 0, rn, cn) wb.close() response = HttpResponse( content_type="application/x-ms-excel") response[ "Content-Disposition"] = "attachment; filename=\"%s.xlsx\"" % filename with open(f.name) as ff: response.write(ff.read()) return response