コード例 #1
0
 def get_neighbor(self, device_id):
     r = DiscoveryID.get_by_udld_id(device_id)
     if r:
         return ManagedObject.get_by_id(r["object"])
     else:
         return None
コード例 #2
0
def wipe(o):
    if not hasattr(o, "id"):
        try:
            o = ManagedObject.objects.get(id=o)
        except ManagedObject.DoesNotExist:
            return True
    log = PrefixLoggerAdapter(logger, str(o.id))
    # Wiping discovery tasks
    log.debug("Wiping discovery tasks")
    for j in [ManagedObject.BOX_DISCOVERY_JOB, ManagedObject.PERIODIC_DISCOVERY_JOB]:
        Job.remove(
            "discovery",
            j,
            key=o.id,
            pool=o.pool.name
        )
    # Wiping FM events
    log.debug("Wiping events")
    FailedEvent.objects.filter(managed_object=o.id).delete()
    ActiveEvent.objects.filter(managed_object=o.id).delete()
    ArchivedEvent.objects.filter(managed_object=o.id).delete()
    # Wiping alarms
    log.debug("Wiping alarms")
    for ac in (ActiveAlarm, ArchivedAlarm):
        for a in ac.objects.filter(managed_object=o.id):
            # Relink root causes
            my_root = a.root
            for iac in (ActiveAlarm, ArchivedAlarm):
                for ia in iac.objects.filter(root=a.id):
                    ia.root = my_root
                    ia.save()
            # Delete alarm
            a.delete()
    # Wiping MAC DB
    log.debug("Wiping MAC DB")
    MACDB._get_collection().remove({"managed_object": o.id})
    # Wiping discovery id cache
    log.debug("Wiping discovery id")
    DiscoveryID._get_collection().remove({"object": o.id})
    # Wiping interfaces, subs and links
    # Wipe links
    log.debug("Wiping links")
    for i in Interface.objects.filter(managed_object=o.id):
        # @todo: Remove aggregated links correctly
        Link.objects.filter(interfaces=i.id).delete()
    #
    log.debug("Wiping subinterfaces")
    SubInterface.objects.filter(managed_object=o.id).delete()
    log.debug("Wiping interfaces")
    Interface.objects.filter(managed_object=o.id).delete()
    log.debug("Wiping forwarding instances")
    ForwardingInstance.objects.filter(managed_object=o.id).delete()
    # Unbind from IPAM
    log.debug("Unbind from IPAM")
    for a in Address.objects.filter(managed_object=o):
        a.managed_object = None
        a.save()
    # Wipe object status
    log.debug("Wiping object status")
    ObjectStatus.objects.filter(object=o.id).delete()
    # Wipe outages
    log.debug("Wiping outages")
    Outage.objects.filter(object=o.id).delete()
    # Wipe uptimes
    log.debug("Wiping uptimes")
    Uptime.objects.filter(object=o.id).delete()
    # Wipe reboots
    log.debug("Wiping reboots")
    Reboot.objects.filter(object=o.id).delete()
    # Delete Managed Object's capabilities
    log.debug("Wiping capabilitites")
    ObjectCapabilities.objects.filter(object=o.id).delete()
    # Delete Managed Object's facts
    log.debug("Wiping facts")
    ObjectFact.objects.filter(object=o.id).delete()
    # Delete Managed Object's attributes
    log.debug("Wiping attributes")
    ManagedObjectAttribute.objects.filter(managed_object=o).delete()
    # Detach from validation rule
    log.debug("Detaching from validation rules")
    for vr in ValidationRule.objects.filter(objects_list__object=o.id):
        vr.objects_list = [x for x in vr.objects_list if x.object.id != o.id]
        if not vr.objects_list and not vr.selectors_list:
            vr.is_active = False
        vr.save()
    # Finally delete object and config
    log.debug("Finally wiping object")
    o.delete()
    log.debug("Done")
コード例 #3
0
 def handler(self):
     self.logger.info("Checking %s topology", self.name)
     # Get segment hierarchy
     segments = set(self.object.get_nested_ids())
     # Get managed objects and id <-> bi_id mappings
     bi_map = {}  # bi_id -> mo
     for mo in ManagedObject.objects.filter(
             segment__in=[str(x) for x in segments]):
         bi_map[str(mo.bi_id)] = mo
     if not bi_map:
         self.logger.info("Empty segment tree. Skipping")
         return
     # Fetch latest MAC tables snapshots from ClickHouse
     # @todo: Apply vlan restrictions
     t0 = datetime.datetime.now() - datetime.timedelta(
         seconds=self.MAC_WINDOW)
     t0 = t0.replace(microsecond=0)
     SQL = """SELECT managed_object, mac, argMax(ts, ts), argMax(interface, ts)
     FROM mac
     WHERE
       date >= toDate('%s')
       AND ts >= toDateTime('%s')
       AND managed_object IN (%s)
     GROUP BY ts, managed_object, mac
     """ % (
         t0.date().isoformat(),
         t0.isoformat(sep=" "),
         ", ".join(bi_map),
     )
     ch = connection()
     # Fill FIB
     mtable = []  # mo_id, mac, iface, ts
     last_ts = {}  # mo -> ts
     for mo_bi_id, mac, ts, iface in ch.execute(post=SQL):
         mo = bi_map.get(mo_bi_id)
         if mo:
             mtable += [[mo, MAC(int(mac)), iface, ts]]
             last_ts[mo] = max(ts, last_ts.get(mo, ts))
     # Filter out aged MACs
     mtable = [m for m in mtable if m[3] == last_ts[m[0]]]
     # Resolve objects
     macs = set(x[1] for x in mtable)
     if not macs:
         self.logger.info("No MAC addresses collected. Stopping")
         return
     object_macs = DiscoveryID.find_objects(macs)
     if not object_macs:
         self.logger.info("Cannot resolve any MAC addresses. Stopping")
         return
     # Build FIB
     fib = {}  # object -> interface -> {seen objects}
     for mo, mac, iface, ts in mtable:
         ro = object_macs.get(mac)
         if not ro:
             continue
         if mo not in fib:
             fib[mo] = {}
         if iface in fib[mo]:
             fib[mo][iface].add(ro)
         else:
             fib[mo][iface] = {ro}
     # Find uplinks and coverage
     coverage = {}  # mo -> covered objects
     uplinks = {}  # mo -> uplink interface
     up_fib = {}  # mo -> {seen via uplinks}
     for mo in fib:
         coverage[mo] = {mo}
         for iface in fib[mo]:
             if self.is_uplink(mo, fib[mo][iface], segments):
                 uplinks[mo] = iface
                 up_fib[mo] = fib[mo][iface]
             else:
                 coverage[mo] |= fib[mo][iface]
         if mo not in uplinks:
             self.logger.info(
                 "[%s] Cannot detect uplinks. Topology may be imprecise",
                 mo.name)
     # Dump FIB
     if self.logger.isEnabledFor(logging.DEBUG):
         for mo in fib:
             self.logger.debug("%s:", mo.name)
             if mo in uplinks:
                 self.logger.debug("  * %s: %s", uplinks[mo],
                                   ", ".join(x.name for x in up_fib[mo]))
             else:
                 self.logger.debug(
                     "    Warning: No uplinks. Topology may be imprecise")
             for iface in fib[mo]:
                 self.logger.debug(
                     "    %s: %s", iface,
                     ", ".join(x.name for x in fib[mo][iface]))
     # Build topology
     for mo in fib:
         for iface in fib[mo]:
             if iface == uplinks.get(mo):
                 continue
             for ro in fib[mo][iface]:
                 cvr = coverage.get(ro)
                 if not cvr:
                     cvr = {ro}
                     coverage[ro] = cvr
                 if not fib[mo][iface] - cvr:
                     # All objects from mo:iface are seen via ro
                     uplink = uplinks.get(ro)
                     if uplink:
                         self.confirm_link(mo, iface, ro, uplink)
                         break
                     else:
                         self.logger.info(
                             "[%s] No uplinks. Cannot link to %s:%s. Topology may be imprecise",
                             ro.name,
                             mo.name,
                             iface,
                         )
コード例 #4
0
ファイル: managedobject.py プロジェクト: marcosvella/noc
    def get_data(self):

        intervals = (
            ("y", 31557617),  # 60 * 60 * 24 * 7 * 52
            ("w", 604800),  # 60 * 60 * 24 * 7
            ("d", 86400),  # 60 * 60 * 24
            ("h", 3600),  # 60 * 60
            ("m", 60),
            ("s", 1),
        )

        def display_time(seconds):
            result = []

            for name, count in intervals:
                value = seconds // count
                if value:
                    seconds -= value * count
                    if value == 1:
                        name = name.rstrip("s")
                    result.append("{}{}".format(value, name))
            return ", ".join(result[:-1])

        def sortdict(dct):
            kys = sorted(dct.keys())
            res = OrderedDict()
            for x in kys:
                for k, v in dct.items():
                    if k == x:
                        res[k] = v
            return res

        def get_container_path(self):
            # Get container path
            if not self.object:
                return None
            cp = []
            if self.object.container:
                c = self.object.container.id
                while c:
                    try:
                        o = Object.objects.get(id=c)
                        # @todo: Address data
                        if o.container:
                            cp.insert(0, {"id": o.id, "name": o.name})
                        c = o.container.id if o.container else None
                    except DoesNotExist:
                        metrics["error", ("type", "no_such_object")] += 1
                        break
            return cp

        if not self.object:
            return None
        # @todo: Stage
        # @todo: Service range
        # @todo: Open TT
        now = datetime.datetime.now()
        # Get object status and uptime

        alarms = list(
            ActiveAlarm.objects.filter(managed_object=self.object.id))

        current_start = None
        duration = None
        if self.object.is_managed:
            if self.object.get_status():
                if alarms:
                    current_state = "alarm"
                else:
                    current_state = "up"
                uptime = Uptime.objects.filter(object=self.object.id,
                                               stop=None).first()
                if uptime:
                    current_start = uptime.start
            else:
                current_state = "down"
                outage = Outage.objects.filter(object=self.object.id,
                                               stop=None).first()
                if outage is not None:
                    current_start = outage.start
        else:
            current_state = "unmanaged"
        if current_start:
            duration = now - current_start

        cp = get_container_path(self)

        # MAC addresses
        macs = []
        o_macs = DiscoveryID.macs_for_object(self.object)
        if o_macs:
            for f, l in o_macs:
                if f == l:
                    macs += [f]
                else:
                    macs += ["%s - %s" % (f, l)]
        # Hostname
        hostname = ""
        did = DiscoveryID.objects.filter(object=self.object.id).first()
        if did and did.hostname:
            hostname = did.hostname
        # Links
        uplinks = set(self.object.data.uplinks)
        if len(uplinks) > 1:
            if self.object.segment.lost_redundancy:
                redundancy = "L"
            else:
                redundancy = "R"
        else:
            redundancy = "N"
        links = []
        for _link in Link.object_links(self.object):
            local_interfaces = []
            remote_interfaces = []
            remote_objects = set()
            for iface in _link.interfaces:
                if iface.managed_object.id == self.object.id:
                    local_interfaces += [iface]
                else:
                    remote_interfaces += [iface]
                    remote_objects.add(iface.managed_object)
            if len(remote_objects) == 1:
                ro = remote_objects.pop()
                if ro.id in uplinks:
                    role = "uplink"
                else:
                    role = "downlink"
                links += [{
                    "id":
                    _link.id,
                    "role":
                    role,
                    "local_interface":
                    sorted(local_interfaces, key=lambda x: alnum_key(x.name)),
                    "remote_object":
                    ro,
                    "remote_interface":
                    sorted(remote_interfaces, key=lambda x: alnum_key(x.name)),
                    "remote_status":
                    "up" if ro.get_status() else "down",
                }]
            links = sorted(
                links,
                key=lambda x: (x["role"] != "uplink",
                               alnum_key(x["local_interface"][0].name)),
            )
        # Build global services summary
        service_summary = ServiceSummary.get_object_summary(self.object)

        # Interfaces
        interfaces = []

        mo = ManagedObject.objects.filter(id=self.object.id)
        mo = mo[0]

        ifaces_metrics, last_ts = get_interface_metrics(mo)
        ifaces_metrics = ifaces_metrics[mo]

        objects_metrics, last_time = get_objects_metrics(mo)
        objects_metrics = objects_metrics.get(mo)

        # Sensors
        sensors_metrics = None
        s_metrics = None
        sensors = {}
        s_meta = []
        STATUS = {0: "OK", 1: "Alarm"}
        meric_map = {}
        if mo.get_caps().get("Sensor | Controller"):
            for mc in MetricType.objects.filter(scope=MetricScope.objects.get(
                    name="Environment")):
                if meric_map:
                    meric_map["map"].update({mc.field_name: mc.name})
                else:
                    meric_map = {
                        "table_name": mc.scope.table_name,
                        "map": {
                            mc.field_name: mc.name
                        }
                    }
            sensors_metrics, last_ts = get_interface_metrics(mo, meric_map)
            sensors_metrics = sensors_metrics[mo]

        m_tp = {}
        if mo.object_profile.metrics:
            for mt in mo.object_profile.metrics:
                if mt.get("threshold_profile"):
                    threshold_profile = ThresholdProfile.get_by_id(
                        mt.get("threshold_profile"))
                    m_tp[MetricType.get_by_id(
                        mt.get("metric_type")).name] = threshold_profile
        data = {}
        meta = []
        metric_type_name = dict(MetricType.objects.filter().scalar(
            "name", "measure"))
        metric_type_field = dict(MetricType.objects.filter().scalar(
            "field_name", "measure"))
        if objects_metrics:
            for path, mres in objects_metrics.items():
                t_v = False
                for key in mres:
                    m_path = path if any(path.split("|")) else key
                    m_path = " | ".join(kk.strip() for kk in m_path.split("|"))
                    if m_tp.get(key):
                        t_v = self.get_threshold_config(
                            m_tp.get(key), int(mres[key]))
                    val = {
                        "name":
                        m_path,
                        "type":
                        "" if m_path == "Object | SysUptime" else
                        metric_type_name[key],
                        "value":
                        display_time(int(mres[key]))
                        if m_path == "Object | SysUptime" else mres[key],
                        "threshold":
                        t_v,
                    }
                    if data.get(key):
                        data[key] += [val]
                    else:
                        data[key] = [val]

        data = sortdict(data)
        for k, d in data.items():
            collapsed = False
            if len(d) == 1:
                collapsed = True
            for dd in d:
                isdanger = False
                if dd["threshold"]:
                    isdanger = True
                    collapsed = True
            meta.append({
                "name": k,
                "value": d,
                "collapsed": collapsed,
                "isdanger": isdanger
            })

        for i in Interface.objects.filter(managed_object=self.object.id,
                                          type="physical"):
            load_in = "-"
            load_out = "-"
            errors_in = "-"
            errors_out = "-"
            iface_metrics = ifaces_metrics.get(str(i.name))

            if iface_metrics:
                for key, value in iface_metrics.items():
                    metric_type = metric_type_name.get(
                        key) or metric_type_field.get(key)
                    if key == "Interface | Load | In":
                        load_in = ("%s%s" %
                                   (self.humanize_speed(value, metric_type),
                                    metric_type) if value else "-")
                    if key == "Interface | Load | Out":
                        load_out = ("%s%s" %
                                    (self.humanize_speed(value, metric_type),
                                     metric_type) if value else "-")
                    if key == "Interface | Errors | In":
                        errors_in = value if value else "-"
                    if key == "Interface | Errors | Out":
                        errors_out = value if value else "-"
            interfaces += [{
                "id":
                i.id,
                "name":
                i.name,
                "admin_status":
                i.admin_status,
                "oper_status":
                i.oper_status,
                "mac":
                i.mac or "",
                "full_duplex":
                i.full_duplex,
                "load_in":
                load_in,
                "load_out":
                load_out,
                "errors_in":
                errors_in,
                "errors_out":
                errors_out,
                "speed":
                max([i.in_speed or 0, i.out_speed or 0]) / 1000,
                "untagged_vlan":
                None,
                "tagged_vlan":
                None,
                "profile":
                i.profile,
                "service":
                i.service,
                "service_summary":
                service_summary.get("interface").get(i.id, {}),
                "description":
                i.description,
            }]
            if sensors_metrics:
                s_metrics = sensors_metrics.get(str(i.name))
            if s_metrics:
                sens_metrics = []
                for i_metrics in i.profile.metrics:
                    sens_metrics.append(i_metrics.metric_type.name)
                for key, value in s_metrics.items():
                    if key not in sens_metrics:
                        continue
                    val = {
                        "name":
                        key,
                        "type":
                        metric_type_name[key],
                        "value":
                        STATUS.get(value)
                        if metric_type_name[key] == " " else value,
                        "threshold":
                        None,
                    }
                    if sensors.get(i.name):
                        sensors[i.name] += [val]
                    else:
                        sensors[i.name] = [val]

            si = list(i.subinterface_set.filter(enabled_afi="BRIDGE"))
            if len(si) == 1:
                si = si[0]
                interfaces[-1]["untagged_vlan"] = si.untagged_vlan
                interfaces[-1]["tagged_vlans"] = list_to_ranges(
                    si.tagged_vlans).replace(",", ", ")

        if sensors:
            sensors = sortdict(sensors)
            for k, d in sensors.items():
                for dd in d:
                    isdanger = False
                    if dd["threshold"]:
                        isdanger = True
                s_meta.append({"name": k, "value": d, "isdanger": isdanger})

        interfaces = sorted(interfaces, key=lambda x: alnum_key(x["name"]))
        # Resource groups
        # Service groups (i.e. server)
        static_services = set(self.object.static_service_groups)
        service_groups = []
        for rg_id in self.object.effective_service_groups:
            rg = ResourceGroup.get_by_id(rg_id)
            service_groups += [{
                "id": rg_id,
                "name": rg.name,
                "technology": rg.technology,
                "is_static": rg_id in static_services,
            }]
        # Client groups (i.e. client)
        static_clients = set(self.object.static_client_groups)
        client_groups = []
        for rg_id in self.object.effective_client_groups:
            rg = ResourceGroup.get_by_id(rg_id)
            client_groups += [{
                "id": rg_id,
                "name": rg.name,
                "technology": rg.technology,
                "is_static": rg_id in static_clients,
            }]
        # @todo: Administrative domain path
        # Alarms
        alarm_list = []
        for a in alarms:
            alarm_list += [{
                "id": a.id,
                "root_id": self.get_root(alarms),
                "timestamp": a.timestamp,
                "duration": now - a.timestamp,
                "subject": a.subject,
                "managed_object": a.managed_object,
                "service_summary": {
                    "service": SummaryItem.items_to_dict(a.total_services),
                    "subscriber":
                    SummaryItem.items_to_dict(a.total_subscribers),
                },
                "alarm_class": a.alarm_class,
            }]
        alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp"))

        # Maintenance
        maintenance = []
        for ao in AffectedObjects._get_collection().find(
            {"affected_objects": {
                "object": self.object.id
            }}):
            m = Maintenance.objects.filter(
                id=ao["maintenance"],
                is_completed=False,
                start__lte=now + datetime.timedelta(hours=1),
            ).first()
            if m:
                maintenance += [{
                    "maintenance": m,
                    "id": m.id,
                    "subject": m.subject,
                    "start": m.start,
                    "stop": m.stop,
                    "in_progress": m.start <= now,
                }]
        # Get Inventory
        inv = []
        for p in self.object.get_inventory():
            c = self.get_nested_inventory(p)
            c["name"] = p.name or self.object.name
            inv += [c]
        # Build result

        if self.object.platform is not None:
            platform = self.object.platform.name
        else:
            platform = "Unknown"
        if self.object.version is not None:
            version = self.object.version.version
        else:
            version = ""

        r = {
            "id":
            self.object.id,
            "object":
            self.object,
            "name":
            self.object.name,
            "address":
            self.object.address,
            "platform":
            platform,
            # self.object.platform.name if self.object.platform else "Unknown",
            "version":
            version,
            # self.object.version.version if self.object.version else "",
            "description":
            self.object.description,
            "object_profile":
            self.object.object_profile.id,
            "object_profile_name":
            self.object.object_profile.name,
            "hostname":
            hostname,
            "macs":
            ", ".join(sorted(macs)),
            "segment":
            self.object.segment,
            "firmware_status":
            FirmwarePolicy.get_status(self.object.platform,
                                      self.object.version),
            "firmware_recommended":
            FirmwarePolicy.get_recommended_version(self.object.platform),
            "service_summary":
            service_summary,
            "container_path":
            cp,
            "current_state":
            current_state,
            # Start of uptime/downtime
            "current_start":
            current_start,
            # Current uptime/downtime
            "current_duration":
            duration,
            "service_groups":
            service_groups,
            "client_groups":
            client_groups,
            "tt": [],
            "links":
            links,
            "alarms":
            alarm_list,
            "interfaces":
            interfaces,
            "metrics":
            meta,
            "sensors":
            s_meta,
            "maintenance":
            maintenance,
            "redundancy":
            redundancy,
            "inventory":
            self.flatten_inventory(inv),
            "serial_number":
            self.object.get_attr("Serial Number"),
            "attributes":
            list(
                ManagedObjectAttribute.objects.filter(
                    managed_object=self.object.id)),
            "confdb":
            None,
        }
        try:
            r["confdb"] = self.object.get_confdb()
        except (SyntaxError, ValueError):
            pass
        return r
コード例 #5
0
ファイル: managedobject.py プロジェクト: skripkar/noc
    def get_data(self):

        def get_container_path(self):
            # Get container path
            if not self.object:
                return None
            cp = []
            if self.object.container:
                c = self.object.container.id
                while c:
                    try:
                        o = Object.objects.get(id=c)
                        # @todo: Address data
                        if o.container:
                            cp.insert(0, {
                                "id": o.id,
                                "name": o.name
                            })
                        c = o.container.id if o.container else None
                    except DoesNotExist:
                        metrics["error", ("type", "no_such_object")] += 1
                        break
            return cp

        if not self.object:
            return None
        # @todo: Stage
        # @todo: Service range
        # @todo: Open TT
        now = datetime.datetime.now()
        # Get object status and uptime

        alarms = list(ActiveAlarm.objects.filter(managed_object=self.object.id))

        current_start = None
        duration = None
        if self.object.is_managed:
            if self.object.get_status():
                if alarms:
                    current_state = "alarm"
                else:
                    current_state = "up"
                uptime = Uptime.objects.filter(object=self.object.id, stop=None).first()
                if uptime:
                    current_start = uptime.start
            else:
                current_state = "down"
                outage = Outage.objects.filter(object=self.object.id, stop=None).first()
                if outage is not None:
                    current_start = outage.start
        else:
            current_state = "unmanaged"
        if current_start:
            duration = now - current_start

        cp = get_container_path(self)

        # MAC addresses
        macs = []
        o_macs = DiscoveryID.macs_for_object(self.object)
        if o_macs:
            for f, l in o_macs:
                if f == l:
                    macs += [f]
                else:
                    macs += ["%s - %s" % (f, l)]

        # Links
        uplinks = set(self.object.data.uplinks)
        if len(uplinks) > 1:
            if self.object.segment.lost_redundancy:
                redundancy = "L"
            else:
                redundancy = "R"
        else:
            redundancy = "N"
        links = []
        for l in Link.object_links(self.object):
            local_interfaces = []
            remote_interfaces = []
            remote_objects = set()
            for i in l.interfaces:
                if i.managed_object.id == self.object.id:
                    local_interfaces += [i]
                else:
                    remote_interfaces += [i]
                    remote_objects.add(i.managed_object)
            if len(remote_objects) == 1:
                ro = remote_objects.pop()
                if ro.id in uplinks:
                    role = "uplink"
                else:
                    role = "downlink"
                links += [{
                    "id": l.id,
                    "role": role,
                    "local_interface": sorted(
                        local_interfaces,
                        key=lambda x: split_alnum(x.name)
                    ),
                    "remote_object": ro,
                    "remote_interface": sorted(
                        remote_interfaces,
                        key=lambda x: split_alnum(x.name)
                    ),
                    "remote_status": "up" if ro.get_status() else "down"
                }]
            links = sorted(links, key=lambda x: (x["role"] != "uplink", split_alnum(x["local_interface"][0].name)))
        # Build global services summary
        service_summary = ServiceSummary.get_object_summary(self.object)

        # Interfaces
        interfaces = []

        mo = ManagedObject.objects.filter(id=self.object.id)

        iface_metrics, last_ts = get_interface_metrics(mo[0])
        iface_metrics = iface_metrics[mo[0]]

        objects_metrics, last_time = get_objects_metrics(mo[0])
        objects_metrics = objects_metrics.get(mo[0])

        meta = ""

        metric_type_name = dict(MetricType.objects.filter().scalar("name", "measure"))
        metric_type_field = dict(MetricType.objects.filter().scalar("field_name", "measure"))

        if objects_metrics is not None:
            if objects_metrics.get("") is not None:
                for key in objects_metrics.get("").keys():
                    if metric_type_name[key] in ["bytes", "bit/s", "bool"]:
                        objects_metrics.get("")[key] = {
                            "type": metric_type_name[key],
                            "value": self.humanize_speed(objects_metrics.get("")[key], metric_type_name[key])
                        }
                    else:
                        objects_metrics.get("")[key] = {
                            "type": metric_type_name[key],
                            "value": objects_metrics.get("")[key]
                        }
                meta = objects_metrics.get("")
            else:
                meta = {}

        if iface_metrics is not None:
            for i in Interface.objects.filter(managed_object=self.object.id, type="physical"):
                load_in = "-"
                load_out = "-"
                errors_in = "-"
                errors_out = "-"
                iface_get_link_name = iface_metrics.get(str(i.name))
                if iface_get_link_name is not None:
                    for key in iface_get_link_name.keys():
                        meta_type = metric_type_name.get(key) or metric_type_field.get(key)
                        iface_get_link_name[key] = {
                            "type": meta_type,
                            "value": self.humanize_speed(
                                str(iface_get_link_name[key]),
                                meta_type)
                        }
                        if key in ['Interface | Load | In',
                                   'Interface | Load | Out',
                                   'Interface | Errors | In',
                                   'Interface | Errors | Out']:
                            try:
                                load_in = iface_get_link_name['Interface | Load | In']["value"] + \
                                    iface_get_link_name['Interface | Load | In']["type"]
                                load_out = iface_get_link_name['Interface | Load | Out']["value"] + \
                                    iface_get_link_name['Interface | Load | Out']["type"]
                                errors_in = iface_get_link_name['Interface | Errors | In']["value"]
                                errors_out = iface_get_link_name['Interface | Errors | Out']["value"]
                            except TypeError:
                                pass
                else:
                    iface_get_link_name = {}

                interfaces += [{
                    "id": i.id,
                    "name": i.name,
                    "admin_status": i.admin_status,
                    "oper_status": i.oper_status,
                    "mac": i.mac or "",
                    "full_duplex": i.full_duplex,
                    "load_in": load_in,
                    "load_out": load_out,
                    "errors_in": errors_in,
                    "errors_out": errors_out,
                    "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000,
                    "untagged_vlan": None,
                    "tagged_vlan": None,
                    "profile": i.profile,
                    "service": i.service,
                    "service_summary":
                        service_summary.get("interface").get(i.id, {})
                }]

                si = list(i.subinterface_set.filter(enabled_afi="BRIDGE"))
                if len(si) == 1:
                    si = si[0]
                    interfaces[-1]["untagged_vlan"] = si.untagged_vlan
                    interfaces[-1]["tagged_vlans"] = list_to_ranges(si.tagged_vlans).replace(",", ", ")
            interfaces = sorted(interfaces, key=lambda x: split_alnum(x["name"]))
        # Resource groups
        # Service groups (i.e. server)
        static_services = set(self.object.static_service_groups)
        service_groups = []
        for rg_id in self.object.effective_service_groups:
            rg = ResourceGroup.get_by_id(rg_id)
            service_groups += [{
                "id": rg_id,
                "name": rg.name,
                "technology": rg.technology,
                "is_static": rg_id in static_services
            }]
        # Client groups (i.e. client)
        static_clients = set(self.object.static_client_groups)
        client_groups = []
        for rg_id in self.object.effective_client_groups:
            rg = ResourceGroup.get_by_id(rg_id)
            client_groups += [{
                "id": rg_id,
                "name": rg.name,
                "technology": rg.technology,
                "is_static": rg_id in static_clients
            }]
        # @todo: Administrative domain path
        # Alarms
        alarm_list = []
        for a in alarms:
            alarm_list += [{
                "id": a.id,
                "root_id": self.get_root(alarms),
                "timestamp": a.timestamp,
                "duration": now - a.timestamp,
                "subject": a.subject,
                "managed_object": a.managed_object,
                "service_summary": {
                    "service": SummaryItem.items_to_dict(a.total_services),
                    "subscriber": SummaryItem.items_to_dict(a.total_subscribers)},
                "alarm_class": a.alarm_class
            }]
        alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp"))

        # Maintenance
        maintenance = []
        for m in Maintenance.objects.filter(
            affected_objects__object=self.object.id,
            is_completed=False,
            start__lte=now + datetime.timedelta(hours=1)
        ):
            maintenance += [{
                "maintenance": m,
                "id": m.id,
                "subject": m.subject,
                "start": m.start,
                "stop": m.stop,
                "in_progress": m.start <= now
            }]
        # Get Inventory
        inv = []
        for p in self.object.get_inventory():
            c = self.get_nested_inventory(p)
            c["name"] = p.name or self.object.name
            inv += [c]
        # Build result

        if self.object.platform is not None:
            platform = self.object.platform.name
        else:
            platform = "Unknown"
        if self.object.version is not None:
            version = self.object.version.version
        else:
            version = ""

        r = {
            "id": self.object.id,
            "object": self.object,
            "name": self.object.name,
            "address": self.object.address,
            "platform": platform,
            # self.object.platform.name if self.object.platform else "Unknown",
            "version": version,
            # self.object.version.version if self.object.version else "",
            "description": self.object.description,
            "object_profile": self.object.object_profile.id,
            "object_profile_name": self.object.object_profile.name,
            "macs": ", ".join(sorted(macs)),
            "segment": self.object.segment,
            "firmware_status": FirmwarePolicy.get_status(
                self.object.platform,
                self.object.version),
            "firmware_recommended":
                FirmwarePolicy.get_recommended_version(self.object.platform),
            "service_summary": service_summary,
            "container_path": cp,
            "current_state": current_state,
            # Start of uptime/downtime
            "current_start": current_start,
            # Current uptime/downtime
            "current_duration": duration,
            "service_groups": service_groups,
            "client_groups": client_groups,
            "tt": [],
            "links": links,
            "alarms": alarm_list,
            "interfaces": interfaces,
            "metrics": meta,
            "maintenance": maintenance,
            "redundancy": redundancy,
            "inventory": self.flatten_inventory(inv),
            "serial_number": self.object.get_attr("Serial Number")
        }
        return r
コード例 #6
0
 def handle_vacuum_bulling(self, ids, *args, **kwargs):
     connect()
     for mo_id in ids:
         mo = ManagedObject.get_by_id(mo_id)
         if not mo:
             self.print("@@@ %s is not found, skipping", mo_id)
             continue
         self.print("@@@ %s (%s, %s)", mo.name, mo.address, mo.id)
         # Get interfaces suitable for bulling
         bulling_ifaces: Set[Interface] = {
             iface
             for iface in Interface.objects.filter(managed_object=mo.id)
             if not iface.profile.allow_vacuum_bulling
         }
         if not bulling_ifaces:
             self.print("No interfaces suitable for vacuum bulling")
             continue
         # Get MAC addresses for bulling
         t0 = datetime.datetime.now() - datetime.timedelta(
             seconds=self.MAC_WINDOW)
         t0 = t0.replace(microsecond=0)
         sql = self.GET_MACS_SQL % (
             mo.bi_id,
             ", ".join("'%s'" % iface.name.replace("'", "''")
                       for iface in bulling_ifaces),
             t0.date().isoformat(),
             t0.isoformat(sep=" "),
         )
         ch = connection()
         last_ts: Optional[str] = None
         all_macs: List[str] = []
         mac_iface: Dict[str, str] = {}
         for ts, iface, mac in ch.execute(post=sql):
             if last_ts is None:
                 last_ts = ts
             elif last_ts > ts:
                 continue
             m = str(MAC(int(mac)))
             all_macs += [m]
             mac_iface[m] = iface
         # Resolve MACs to known chassis-id
         mac_map = DiscoveryID.find_objects(all_macs)
         # Filter suitable rivals
         seg_ifaces: DefaultDict[NetworkSegment,
                                 Set[str]] = defaultdict(set)
         iface_segs: DefaultDict[str,
                                 Set[NetworkSegment]] = defaultdict(set)
         for mac, r_mo in mac_map.items():
             iface = mac_iface.get(mac)
             if not iface:
                 continue
             seg_ifaces[r_mo.segment].add(iface)
             iface_segs[iface].add(r_mo.segment)
         rej_ifaces: Set[str] = set()
         for seg in seg_ifaces:
             if len(seg_ifaces[seg]
                    ) > 1 or seg.profile.is_persistent or seg == mo.segment:
                 # Seen on multiple interfaces or persistent segment or same segment
                 rej_ifaces |= set(seg_ifaces[seg])
                 continue
         for iface in sorted(iface_segs, key=alnum_key):
             if iface in rej_ifaces:
                 continue
             for seg in iface_segs[iface]:
                 self.print("  '%s' challenging '%s' on %s" %
                            (mo.segment.name, seg.name, iface))
                 BioSegTrial.schedule_trial(seg, mo.segment)
コード例 #7
0
ファイル: views.py プロジェクト: nbashev/noc
    def load(ids, ignore_profiles=None, filter_exists_link=False):
        problems = defaultdict(dict)  # id -> problem
        rg = re.compile(
            r"Pending\slink:\s(?P<local_iface>.+?)(\s-\s)(?P<remote_mo>.+?):(?P<remote_iface>\S+)",
            re.IGNORECASE,
        )
        mos_job = [
            "discovery-noc.services.discovery.jobs.box.job.BoxDiscoveryJob-%d"
            % mo_id for mo_id in ids
        ]
        n = 0
        ignored_ifaces = []
        find = DiscoveryID._get_collection().aggregate(
            [
                {
                    "$unwind": "$macs"
                },
                {
                    "$group": {
                        "_id": "$macs",
                        "count": {
                            "$sum": 1
                        },
                        "mo": {
                            "$push": "$object"
                        }
                    }
                },
                {
                    "$match": {
                        "count": {
                            "$gt": 1
                        }
                    }
                },
                {
                    "$unwind": "$mo"
                },
                {
                    "$group": {
                        "_id": "",
                        "mos": {
                            "$addToSet": "$mo"
                        }
                    }
                },
            ],
            allowDiskUse=True,
        )
        find = next(find)
        duplicate_macs = set(find["mos"])
        while mos_job[(0 + n):(10000 + n)]:
            job_logs = (get_db()["noc.joblog"].with_options(
                read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate([
                    {
                        "$match": {
                            "$and": [
                                {
                                    "_id": {
                                        "$in": mos_job[(0 + n):(10000 + n)]
                                    }
                                },
                                {
                                    "problems.lldp": {
                                        "$exists": True
                                    }
                                },
                            ]
                        }
                    },
                    {
                        "$project": {
                            "_id": 1,
                            "problems.lldp": 1
                        }
                    },
                ]))

            for discovery in job_logs:
                if ("RPC Error:" in discovery["problems"]["lldp"]
                        or "Unhandled exception"
                        in discovery["problems"]["lldp"]):
                    continue
                mo_id = discovery["_id"].split("-")[2]
                mo = ManagedObject.get_by_id(mo_id)
                # log.debug("%s", discovery["problems"]["lldp"])
                # print(discovery["problems"]["lldp"])
                if ignore_profiles:
                    ignored_ifaces += [
                        (mo_id, iface.name)
                        for iface in Interface.objects.filter(
                            managed_object=mo,
                            # name__in=discovery["problems"]["lldp"].keys(),
                            profile__in=ignore_profiles,
                        )
                    ]
                for iface in discovery["problems"]["lldp"]:
                    if (mo.id, iface) in ignored_ifaces:
                        continue
                    # print iface
                    if "is not found" in discovery["problems"]["lldp"][iface]:
                        _, parsed_x = discovery["problems"]["lldp"][
                            iface].split("'", 1)
                        parsed_x, _ = parsed_x.rsplit("'", 1)
                        parsed_x = ast.literal_eval(parsed_x)
                        problems[mo.id] = {
                            iface: {
                                "problem":
                                "Remote object is not found",
                                "detail":
                                "Remote object not in system or ID discovery not success",
                                "remote_id":
                                "",
                                "remote_iface":
                                parsed_x.get("remote_port"),
                                "remote_hostname":
                                parsed_x.get("remote_system_name"),
                                "remote_description":
                                parsed_x.get("remote_system_description"),
                                "remote_chassis":
                                parsed_x.get("remote_chassis_id"),
                            }
                        }
                    if "Pending link:" in discovery["problems"]["lldp"][iface]:
                        pend_str = rg.match(
                            discovery["problems"]["lldp"][iface])
                        try:
                            rmo = ManagedObject.objects.get(
                                name=pend_str.group("remote_mo"))
                        except ManagedObject.DoesNotExist:
                            continue
                        if (filter_exists_link and Link.objects.filter(
                                linked_objects=[mo.id, rmo.id]).first()):
                            # If already linked on other proto
                            continue
                        detail = ""
                        if mo.id in duplicate_macs or rmo.id in duplicate_macs:
                            detail = "Duplicate ID"
                        # mo = mos_id.get(mo_id, ManagedObject.get_by_id(mo_id))
                        problems[mo.id][iface] = {
                            "problem": "Not found iface on remote",
                            "detail": detail,
                            "remote_id":
                            "%s::: %s" % (rmo.name, rmo.profile.name),
                            "remote_iface": pend_str.group("remote_iface"),
                        }
                        problems[rmo.id][pend_str.group("remote_iface")] = {
                            "problem": "Not found local iface on remote",
                            "detail": detail,
                            "remote_id":
                            "%s::: %s" % (mo.name, mo.profile.name),
                            "remote_iface": iface,
                        }
                        # print(discovery["problems"]["lldp"])
            n += 10000
        return problems
コード例 #8
0
ファイル: managedobject.py プロジェクト: fossabot/noc
def wipe(o):
    if o.profile_name.startswith("NOC."):
        return True
    log = PrefixLoggerAdapter(logger, str(o.id))
    # Delete active map tasks
    log.debug("Wiping MAP tasks")
    MapTask.objects.filter(managed_object=o).delete()
    # Wiping discovery tasks
    log.debug("Wiping discovery tasks")
    db = get_db()
    db.noc.schedules.inv.discovery.remove({"key": o.id})
    # Wiping FM events
    log.debug("Wiping events")
    NewEvent.objects.filter(managed_object=o.id).delete()
    FailedEvent.objects.filter(managed_object=o.id).delete()
    ActiveEvent.objects.filter(managed_object=o.id).delete()
    ArchivedEvent.objects.filter(managed_object=o.id).delete()
    # Wiping alarms
    log.debug("Wiping alarms")
    for ac in (ActiveAlarm, ArchivedAlarm):
        for a in ac.objects.filter(managed_object=o.id):
            # Relink root causes
            my_root = a.root
            for iac in (ActiveAlarm, ArchivedAlarm):
                for ia in iac.objects.filter(root=a.id):
                    ia.root = my_root
                    ia.save()
            # Delete alarm
            a.delete()
    # Wiping MAC DB
    log.debug("Wiping MAC DB")
    MACDB._get_collection().remove({"managed_object": o.id})
    # Wiping pending link check
    log.debug("Wiping pending link check")
    PendingLinkCheck._get_collection().remove({"local_object": o.id})
    PendingLinkCheck._get_collection().remove({"remote_object": o.id})
    # Wiping discovery id cache
    log.debug("Wiping discovery id")
    DiscoveryID._get_collection().remove({"object": o.id})
    # Wiping interfaces, subs and links
    # Wipe links
    log.debug("Wiping links")
    for i in Interface.objects.filter(managed_object=o.id):
        # @todo: Remove aggregated links correctly
        Link.objects.filter(interfaces=i.id).delete()
    #
    log.debug("Wiping subinterfaces")
    SubInterface.objects.filter(managed_object=o.id).delete()
    log.debug("Wiping interfaces")
    Interface.objects.filter(managed_object=o.id).delete()
    log.debug("Wiping forwarding instances")
    ForwardingInstance.objects.filter(managed_object=o.id).delete()
    # Unbind from IPAM
    log.debug("Unbind from IPAM")
    for a in Address.objects.filter(managed_object=o):
        a.managed_object = None
        a.save()
    # Wipe object status
    log.debug("Wiping object status")
    ObjectStatus.objects.filter(object=o.id).delete()
    # Wipe outages
    log.debug("Wiping outages")
    Outage.objects.filter(object=o.id).delete()
    # Wipe uptimes
    log.debug("Wiping uptimes")
    Uptime.objects.filter(object=o.id).delete()
    # Wipe reboots
    log.debug("Wiping reboots")
    Reboot.objects.filter(object=o.id).delete()
    # Delete Managed Object's capabilities
    log.debug("Wiping capabilitites")
    ObjectCapabilities.objects.filter(object=o.id).delete()
    # Delete Managed Object's facts
    log.debug("Wiping facts")
    ObjectFact.objects.filter(object=o.id).delete()
    # Delete Managed Object's attributes
    log.debug("Wiping attributes")
    ManagedObjectAttribute.objects.filter(managed_object=o).delete()
    # Detach from validation rule
    log.debug("Detaching from validation rules")
    for vr in ValidationRule.objects.filter(objects_list__object=o.id):
        vr.objects_list = [x for x in vr.objects_list if x.object.id != o.id]
        if not vr.objects_list and not vr.selectors_list:
            vr.is_active = False
        vr.save()
    # Finally delete object and config
    log.debug("Finally wiping object")
    o.delete()
    log.debug("Done")