def get_object(cls, id): mo = ManagedObject.objects.filter(id=id).values_list( "id", "is_managed", "pool", "fm_pool", "address", "trap_community", "trap_source_ip", "trap_source_type", "event_processing_policy", "object_profile__event_processing_policy", )[:1] if not mo: raise KeyError() ( mo_id, is_managed, pool, fm_pool, address, trap_community, trap_source_ip, trap_source_type, event_processing_policy, mop_event_processing_policy, ) = mo[0] # Process event policy if (not is_managed or (str(event_processing_policy) == "P" and str(mop_event_processing_policy) != "E") or str(event_processing_policy) == "D" or str(trap_source_type) == "d"): raise KeyError() # Process trap sources pool = str(Pool.get_by_id(pool).name) r = { "id": str(mo_id), "pool": pool, "fm_pool": str(Pool.get_by_id(fm_pool).name) if fm_pool else pool, "addresses": [], "trap_community": trap_community, } if str(trap_source_type) == "m" and address: r["addresses"] += [str(address)] elif str(trap_source_type) == "s" and trap_source_ip: r["addresses"] = [str(trap_source_ip)] elif trap_source_type == "l": # Loopback address r["addresses"] = cls._get_loopback_addresses(mo_id) if not r["addresses"]: raise KeyError() elif trap_source_type == "a": # All interface addresses r["addresses"] = cls._get_all_addresses(mo_id) if not r["addresses"]: raise KeyError() else: raise KeyError() return r
def handle_mirror(self, split=False, path=None, *args, **options): from noc.sa.models.managedobject import ManagedObject from noc.main.models.pool import Pool mirror = os.path.realpath(path) self.print("Mirroring to %s" % path) if self.repo == "config": for o_id, address, pool in self.progress( ManagedObject.objects.filter().values_list( "id", "address", "pool")): pool = Pool.get_by_id(pool) data = self.vcs.get(self.clean_id(o_id)) if data: if split: mpath = os.path.realpath( os.path.join(mirror, str(pool), str(address))) else: mpath = os.path.realpath( os.path.join(mirror, str(address))) if mpath.startswith(mirror): safe_rewrite(mpath, data) else: self.print(" !!! mirror path violation for" % address) self.print("Done")
def get_task_count(): """ Calculate discovery tasks :return: """ from django.db import connection cursor = connection.cursor() SQL = """SELECT mo.pool, mop.%s_discovery_interval, count(*) FROM sa_managedobject mo, sa_managedobjectprofile mop WHERE mo.object_profile_id = mop.id and mop.enable_%s_discovery = true and mo.is_managed = true GROUP BY mo.pool, mop.%s_discovery_interval; """ r = defaultdict(dict) r["all"]["sum_task_per_seconds"] = 0.0 r["all"]["box_task_per_seconds"] = 0.0 r["all"]["periodic_task_per_seconds"] = 0.0 for s in ("box", "periodic"): cursor.execute(SQL % (s, s, s)) for c in cursor.fetchall(): p = Pool.get_by_id(c[0]) r[p][c[1]] = c[2] if "sum_task_per_seconds" not in r[p]: r[p]["sum_task_per_seconds"] = 0.0 if "%s_task_per_seconds" % s not in r[p]: r[p]["%s_task_per_seconds" % s] = 0.0 r[p]["sum_task_per_seconds"] += float(c[2]) / float(c[1]) r[p]["%s_task_per_seconds" % s] += float(c[2]) / float(c[1]) r["all"]["sum_task_per_seconds"] += float(c[2]) / float(c[1]) r["all"]["%s_task_per_seconds" % s] += float(c[2]) / float(c[1]) return r
def get_data(self, request, pool=None, obj_profile=None, **kwargs): data = [] if pool: pool = Pool.get_by_id(pool) else: pool = Pool.get_by_name("default") # Get all managed objects mos = ManagedObject.objects.filter(is_managed=True, pool=pool) if not request.user.is_superuser: mos = ManagedObject.objects.filter( is_managed=True, pool=pool, administrative_domain__in=UserAccess.get_domains(request.user), ) if obj_profile: mos = mos.filter(object_profile=obj_profile) columns = (_("Managed Object"), _("Address"), _("Object"), _("Capabilities")) for mo in mos: mo.get_caps() data += [(mo.name, mo.address, _("Main"), ";".join(mo.get_caps()))] for i in Interface.objects.filter(managed_object=mo): if i.type == "SVI": continue data += [(mo.name, mo.address, i.name, ";".join(i.enabled_protocols))] return self.from_dataset(title=self.title, columns=columns, data=data)
def get_object(cls, id): mo = ManagedObject.objects.filter(id=id).values_list( "id", "name", "bi_id", "is_managed", "pool", "address", "time_pattern", "object_profile__enable_ping", "object_profile__ping_interval", "object_profile__ping_policy", "object_profile__ping_size", "object_profile__ping_count", "object_profile__ping_timeout_ms", "object_profile__report_ping_rtt", "object_profile__report_ping_attempts")[:1] if not mo: raise KeyError() (mo_id, name, bi_id, is_managed, pool, address, time_pattern, enable_ping, ping_interval, ping_policy, ping_size, ping_count, ping_timeout_ms, report_ping_rtt, report_ping_attempts) = mo[0] if not is_managed or not address or not enable_ping or not ping_interval or ping_interval < 0: raise KeyError() r = { "id": str(mo_id), "pool": str(Pool.get_by_id(pool).name), "address": str(address), "interval": ping_interval, "policy": ping_policy, "size": ping_size, "count": ping_count, "timeout": ping_timeout_ms, "report_rtt": report_ping_rtt, "report_attempts": report_ping_attempts, "status": None, "name": name, "bi_id": bi_id } if time_pattern: r["time_expr"] = TimePattern.get_code(time_pattern) return r
def get_object(cls, id): mo = ManagedObject.objects.filter(id=id).values_list( "id", "bi_id", "is_managed", "pool", "address", "syslog_source_ip", "syslog_source_type", "event_processing_policy", "object_profile__event_processing_policy", "syslog_archive_policy", "object_profile__syslog_archive_policy")[:1] if not mo: raise KeyError() (mo_id, bi_id, is_managed, pool, address, syslog_source_ip, syslog_source_type, event_processing_policy, mop_event_processing_policy, syslog_archive_policy, mop_syslog_archive_policy) = mo[0] # Check if object capable to receive syslog events if not is_managed or str(syslog_source_type) == "d": raise KeyError() # Get effective event processing policy event_processing_policy = str(event_processing_policy) mop_event_processing_policy = str(mop_event_processing_policy) effective_epp = event_processing_policy == "E" or ( event_processing_policy == "P" and mop_event_processing_policy == "E") # Get effective event archiving policy syslog_archive_policy = str(syslog_archive_policy) mop_syslog_archive_policy = str(mop_syslog_archive_policy) effective_sap = syslog_archive_policy == "E" or ( syslog_archive_policy == "P" and mop_syslog_archive_policy == "E") # Check syslog events may be processed if not effective_epp and not effective_sap: raise KeyError() # Process syslog sources r = { "id": str(mo_id), "bi_id": str(bi_id), "pool": str(Pool.get_by_id(pool).name), "addresses": [], "process_events": effective_epp, "archive_events": effective_sap } if syslog_source_type == "m" and address: # Managed Object's address r["addresses"] += [str(address)] elif syslog_source_type == "s" and syslog_source_ip: # Syslog source set manually r["addresses"] = [str(syslog_source_ip)] elif syslog_source_type == "l": # Loopback address r["addresses"] = cls._get_loopback_addresses(mo_id) if not r["addresses"]: raise KeyError() elif syslog_source_type == "a": # All interface addresses r["addresses"] = cls._get_all_addresses(mo_id) if not r["addresses"]: raise KeyError() else: raise KeyError() return r
def extra_query(self, q, order): extra = {} if "status" in q: extra["s"] = q["status"] if "ldur" in q: extra["ldur"] = {"$gte": int(q["ldur"])} if "pool" in q: extra["pool"] = Pool.get_by_id(q["pool"]).name return extra, []
def iter_metrics(self): for pool_id, pool_managed, pool_unmanaged in self.pg_execute(self.SQL_POOL_MO): pool = Pool.get_by_id(pool_id) if not pool: continue yield ("inventory_managedobject_managed", ("pool", pool.name)), pool_managed yield ("inventory_managedobject_unmanaged", ("pool", pool.name)), pool_unmanaged yield ( "inventory_managedobject_total", ("pool", pool.name), ), pool_managed + pool_unmanaged
def __init__(self, mos, avail_only=False, match=None): """ :param mos: :type mos: ManagedObject.objects.filter() """ self.mo_ids = list(mos.values_list("id", flat=True)) if avail_only: status = ObjectStatus.get_statuses(self.mo_ids) self.mo_ids = [s for s in status if status[s]] self.mos_pools = [Pool.get_by_id(p) for p in set(mos.values_list("pool", flat=True))] self.coll_name = "noc.schedules.discovery.%s" # @todo Good way for pipelines fill self.pipelines = {} self.match = match
def get_data(self, request, pool=None, selector=None, report_type=None, **kwargs): data = [] columns, columns_desr = [], [] r_map = [ (_("Not Available"), "2is1.3isp1.3is1"), (_("Failed to guess CLI credentials"), "2is1.6is1.3isp0.2isp1"), (_("Failed to guess SNMP community"), "2is1.6is1.3isp1.3is2.1isp1"), ] for x, y in r_map: columns += [y] columns_desr += [x] mos = ManagedObject.objects.filter() if pool: pool = Pool.get_by_id(pool) mos = mos.filter(pool=pool) data += [SectionRow(name=pool.name)] if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) mos = list(mos.values_list("id", flat=True).order_by("id")) mos_s = set(mos) report = ReportModelFilter() result = report.proccessed(",".join(columns)) mo_hostname = ReportObjectsHostname1(sync_ids=mos) mo_hostname = mo_hostname.get_dictionary() d_result = ReportDiscoveryResult(sync_ids=mos) d_result = d_result.get_dictionary() for col in columns: for mo_id in result[col.strip()].intersection(mos_s): mo = ManagedObject.get_by_id(mo_id) problem = self.decode_problem(d_result.get(mo_id)) if not problem and mo_id not in d_result: problem = "Discovery disabled" data += [( mo.name, mo.address, mo.administrative_domain.name, mo.profile.name, mo_hostname.get(mo.id, ""), mo.auth_profile if mo.auth_profile else "", mo.auth_profile.user if mo.auth_profile else mo.user, mo.auth_profile.snmp_ro if mo.auth_profile else mo.snmp_ro, _("No") if not mo.get_status() else _("Yes"), columns_desr[columns.index(col)], problem, )] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("Administrative Domain"), _("Profile"), _("Hostname"), _("Auth Profile"), _("Username"), _("SNMP Community"), _("Avail"), _("Error"), _("Error Detail"), ], data=data, )
def get_data( self, request, pool=None, obj_profile=None, selector=None, avail_status=None, profile_check_only=None, failed_scripts_only=None, filter_pending_links=None, filter_none_objects=None, filter_view_other=None, **kwargs ): data = [] match = None code_map = { "1": "Unknown error", "10000": "Unspecified CLI error", "10005": "Connection refused", "10001": "Authentication failed", "10002": "No super command defined", "10003": "No super privileges", "10004": "SSH Protocol error", } if pool: pool = Pool.get_by_id(pool) else: pool = Pool.objects.filter()[0] data += [SectionRow(name="Report by %s" % pool.name)] if selector: mos = ManagedObject.objects.filter(selector.Q) else: mos = ManagedObject.objects.filter(pool=pool, is_managed=True) if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) if obj_profile: mos = mos.filter(object_profile=obj_profile) if filter_view_other: mnp_in = list(ManagedObjectProfile.objects.filter(enable_ping=False)) mos = mos.filter(profile=Profile.objects.get(name=GENERIC_PROFILE)).exclude( object_profile__in=mnp_in ) if profile_check_only: match = { "$or": [ {"job.problems.suggest_cli": {"$exists": True}}, {"job.problems.suggest_snmp": {"$exists": True}}, {"job.problems.profile.": {"$regex": "Cannot detect profile"}}, {"job.problems.version.": {"$regex": "Remote error code 1000[1234]"}}, ] } elif failed_scripts_only: match = { "$and": [ {"job.problems": {"$exists": "true", "$ne": {}}}, {"job.problems.suggest_snmp": {"$exists": False}}, {"job.problems.suggest_cli": {"$exists": False}}, ] } elif filter_view_other: match = {"job.problems.suggest_snmp": {"$exists": False}} rdp = ReportDiscoveryProblem(mos, avail_only=avail_status, match=match) exclude_method = [] if filter_pending_links: exclude_method += ["lldp", "lacp", "cdp", "huawei_ndp"] for discovery in rdp: mo = ManagedObject.get_by_id(discovery["key"]) for method in [x for x in discovery["job"][0]["problems"] if x not in exclude_method]: problem = discovery["job"][0]["problems"][method] if filter_none_objects and not problem: continue if isinstance(problem, dict) and "" in problem: problem = problem.get("", "") if "Remote error code" in problem: problem = code_map.get(problem.split(" ")[-1], problem) if isinstance(problem, six.string_types): problem = problem.replace("\n", " ").replace("\r", " ") data += [ ( mo.name, mo.address, mo.profile.name, mo.administrative_domain.name, _("Yes") if mo.get_status() else _("No"), discovery["st"].strftime("%d.%m.%Y %H:%M") if "st" in discovery else "", method, problem, ) ] return self.from_dataset( title=self.title, columns=[ _("Managed Object"), _("Address"), _("Profile"), _("Administrative Domain"), _("Avail"), _("Last successful discovery"), _("Discovery"), _("Error"), ], data=data, )