Ejemplo n.º 1
0
    def handle(self):
        cdp = CiscoCDPMib(self.agent)

        stampcheck = yield self._get_stampcheck(cdp)
        remote_table_has_changed = yield stampcheck.is_changed()
        need_to_collect = remote_table_has_changed

        if not remote_table_has_changed:
            cache = yield self._get_cached_neighbors()
            if cache is not None:
                self._logger.debug("Using cached CDP neighbors")
                self.neighbors = cache
            else:
                self._logger.debug(
                    "CDP cache table didn't change, but local cache was empty")
                need_to_collect = True

        if need_to_collect:
            self._logger.debug("collecting CDP cache table")
            self.neighbors = yield cdp.get_cdp_neighbors()

        if self.neighbors:
            self._logger.debug("CDP neighbors:\n %r", self.neighbors)
            yield run_in_thread(self._process_neighbors)
            yield self._save_cached_neighbors(self.neighbors)
        else:
            self._logger.debug("No CDP neighbors to process")

        # Store sentinels to signal that CDP neighbors have been processed
        shadows.AdjacencyCandidate.sentinel(self.containers, SOURCE)
        shadows.UnrecognizedNeighbor.sentinel(self.containers, SOURCE)
        stampcheck.save()
Ejemplo n.º 2
0
 def can_handle(cls, netbox):
     base_can_handle = yield defer.maybeDeferred(
         super(StatSensors, cls).can_handle, netbox)
     if base_can_handle:
         i_can_handle = yield run_in_thread(cls._has_sensors, netbox)
         defer.returnValue(i_can_handle)
     defer.returnValue(base_can_handle)
Ejemplo n.º 3
0
Archivo: cdp.py Proyecto: hawken93/nav
 def _get_cached_neighbors(self):
     """Retrieves a cached version of the remote neighbor table"""
     value = yield run_in_thread(
         manage.NetboxInfo.cache_get,
         self.netbox,
         INFO_KEY_NAME,
         INFO_VAR_NEIGHBORS_CACHE
     )
     defer.returnValue(value)
Ejemplo n.º 4
0
Archivo: cdp.py Proyecto: hawken93/nav
 def _save_cached_neighbors(self, neighbors):
     """Saves a cached a copy of the remote neighbor table"""
     yield run_in_thread(
         manage.NetboxInfo.cache_set,
         self.netbox,
         INFO_KEY_NAME,
         INFO_VAR_NEIGHBORS_CACHE,
         neighbors
     )
Ejemplo n.º 5
0
 def _get_cached_remote_table(self):
     """Retrieves a cached version of the remote neighbor table"""
     value = yield run_in_thread(
         manage.NetboxInfo.cache_get,
         self.netbox,
         INFO_KEY_LLDP_INFO,
         INFO_VAR_REMOTES_CACHE,
     )
     defer.returnValue(value)
Ejemplo n.º 6
0
 def _save_cached_remote_table(self, remote_table):
     """Saves a cached copy of the remote neighbor table"""
     yield run_in_thread(
         manage.NetboxInfo.cache_set,
         self.netbox,
         INFO_KEY_LLDP_INFO,
         INFO_VAR_REMOTES_CACHE,
         remote_table,
     )
Ejemplo n.º 7
0
    def _get_type_from_db(self):
        """Loads from db a type object matching the sysobjectid."""
        def _single_result(result):
            if result:
                return result[0]

        # Look up existing type entry
        types = manage.NetboxType.objects.filter(sysobjectid=self.sysobjectid)
        df = db.run_in_thread(storage.shadowify_queryset_and_commit, types)
        df.addCallback(_single_result)
        return df
Ejemplo n.º 8
0
    def handle(self):
        self._logger.debug("snmp version from db: %s",
                           self.netbox.snmp_version)
        was_down = yield db.run_in_thread(self._currently_down)
        is_ok = yield self._do_check()

        if is_ok and was_down:
            yield self._mark_as_up()
        elif not is_ok and not was_down:
            yield self._mark_as_down()
            raise SuggestedReschedule(delay=60)
Ejemplo n.º 9
0
    def handle(self):
        self._logger.debug("snmp version from db: %s", self.netbox.snmp_version)
        was_down = yield db.run_in_thread(self._currently_down)
        is_ok = yield self._do_check()

        if is_ok and was_down:
            yield self._mark_as_up()
        elif not is_ok:
            # Always send down events; eventengine will ignore any duplicates
            yield self._mark_as_down()
            raise SuggestedReschedule(delay=60)
Ejemplo n.º 10
0
    def handle(self):
        mib = LLDPMib(self.agent)
        stampcheck = yield self._stampcheck(mib)
        need_to_collect = yield stampcheck.is_changed()
        if need_to_collect:
            self.remote = yield mib.get_remote_table()
            if self.remote:
                self._logger.debug("LLDP neighbors:\n %s", pformat(self.remote))
            yield run_in_thread(self._process_remote)

        stampcheck.save()
Ejemplo n.º 11
0
    def handle(self):
        cdp = CiscoCDPMib(self.agent)
        stampcheck = yield self._stampcheck(cdp)
        need_to_collect = yield stampcheck.is_changed()
        if need_to_collect:
            cache = yield cdp.get_cdp_neighbors()
            if cache:
                self._logger.debug("found CDP cache data: %r", cache)
                self.cache = cache
                yield run_in_thread(self._process_cache)

        stampcheck.save()
Ejemplo n.º 12
0
    def handle(self):
        mib = LLDPMib(self.agent)
        stampcheck = yield self._stampcheck(mib)
        need_to_collect = yield stampcheck.is_changed()
        if need_to_collect:
            self.remote = yield mib.get_remote_table()
            if self.remote:
                self._logger.debug("LLDP neighbors:\n %s",
                                   pformat(self.remote))
            yield run_in_thread(self._process_remote)

        stampcheck.save()
Ejemplo n.º 13
0
    def handle(self):
        if self.netbox.master:
            defer.returnValue(None)
        netboxes = yield db.run_in_thread(self._get_netbox_list)
        bandwidth = yield self._collect_bandwidth(netboxes)
        cpu = yield self._collect_cpu(netboxes)
        sysuptime = yield self._collect_sysuptime(netboxes)
        memory = yield self._collect_memory(netboxes)

        metrics = bandwidth + cpu + sysuptime + memory
        if metrics:
            send_metrics(metrics)
Ejemplo n.º 14
0
    def _get_type_from_db(self):
        """Loads from db a type object matching the sysobjectid."""
        def _single_result(result):
            if result:
                return result[0]

        # Look up existing type entry
        types = manage.NetboxType.objects.filter(sysobjectid=self.sysobjectid)
        df = db.run_in_thread(storage.shadowify_queryset_and_commit,
                              types)
        df.addCallback(_single_result)
        return df
Ejemplo n.º 15
0
    def handle(self):
        units = yield db.run_in_thread(self._get_database_unit_list)

        state_map = {}
        for unit in units:
            old_state = unit.up
            new_state = yield self._retrieve_current_unit_state(unit)
            state_map[unit] = new_state
            if old_state != new_state:
                yield self._handle_state_change(unit, new_state)

        defer.returnValue(True)
Ejemplo n.º 16
0
    def handle(self):
        cdp = CiscoCDPMib(self.agent)
        stampcheck = yield self._stampcheck(cdp)
        need_to_collect = yield stampcheck.is_changed()
        if need_to_collect:
            cache = yield cdp.get_cdp_neighbors()
            if cache:
                self._logger.debug("found CDP cache data: %r", cache)
                self.cache = cache
                yield run_in_thread(self._process_cache)

        stampcheck.save()
Ejemplo n.º 17
0
 def handle(self):
     sensors = yield run_in_thread(self._get_sensors)
     self._logger.debug("retrieving data from %d sensors", len(sensors))
     oids = sensors.keys()
     requests = [
         oids[x:x + MAX_SENSORS_PER_REQUEST]
         for x in range(0, len(oids), MAX_SENSORS_PER_REQUEST)
     ]
     for req in requests:
         data = yield self.agent.get(req).addCallback(
             self._response_to_metrics, sensors)
         self._logger.debug("got data from sensors: %r", data)
Ejemplo n.º 18
0
    def handle(self):
        if self.netbox.master:
            yield self._log_instance_details()
            defer.returnValue(None)

        timestamp = time.time()
        stats = yield self._get_stats()
        netboxes = yield db.run_in_thread(self._get_netbox_list)
        tuples = list(self._make_metrics(stats, netboxes=netboxes, timestamp=timestamp))
        if tuples:
            self._logger.debug("Counters collected")
            send_metrics(tuples)
Ejemplo n.º 19
0
    def _log_instance_details(self):
        def _get_master_and_instance_list():
            netbox = manage.Netbox.objects.get(id=self.netbox.id)

            my_ifcs = netbox.interface_set.values_list('ifname', flat=True)
            masters_ifcs = netbox.master.interface_set.values_list(
                'ifname', flat=True)
            local_ifcs = set(masters_ifcs) - set(my_ifcs)
            return netbox.master.sysname, local_ifcs

        if self._logger.isEnabledFor(logging.DEBUG):
            master, ifcs = yield db.run_in_thread(_get_master_and_instance_list)
            self._logger.debug("local interfaces (that do not exist on master "
                               "%s): %r", master, ifcs)
Ejemplo n.º 20
0
    def load(self):
        "Loads existing timestamps from db"
        def _unpickle():
            try:
                info = manage.NetboxInfo.objects.get(
                    netbox__id=self._get_netbox().id,
                    key=INFO_KEY_NAME, variable=self.var_name)
            except manage.NetboxInfo.DoesNotExist:
                return None
            try:
                return pickle.loads(str(info.value))
            except Exception:
                return None

        self.loaded_times = yield db.run_in_thread(_unpickle)
        defer.returnValue(self.loaded_times)
Ejemplo n.º 21
0
    def create_new_type(self):
        """Creates a new NetboxType from the collected sysObjectID."""
        vendor_id = yield db.run_in_thread(get_vendor_id, self.sysobjectid)
        vendor = self.containers.factory(vendor_id, shadows.Vendor)
        vendor.id = vendor_id

        type_ = self.containers.factory(self.sysobjectid, shadows.NetboxType)
        type_.vendor = vendor
        type_.name = self.sysobjectid
        type_.sysobjectid = self.sysobjectid

        def _set_sysdescr(descr):
            self._logger.debug("Creating new type with descr=%r", descr)
            type_.description = descr
            return type_

        yield self.snmpv2_mib.get_sysDescr().addCallback(_set_sysdescr)
Ejemplo n.º 22
0
    def on_netbox_type_changed(self, netbox_id, new_type, **kwargs):
        """Performs various cleanup and reload actions on a netbox type change
        signal.

        The netbox' data are cleaned up, and the next netbox data reload is
        scheduled to take place immediately.

        """
        sysname = netbox_id in self.netboxes and \
            self.netboxes[netbox_id].sysname or str(netbox_id)
        self._logger.info("Cancelling all jobs for %s due to type change.",
                          sysname)
        self.cancel_netbox_scheduler(netbox_id)

        df = db.run_in_thread(shadows.Netbox.cleanup_replaced_netbox,
                              netbox_id, new_type)
        return df.addCallback(lambda x: self._start_netbox_reload_loop())
Ejemplo n.º 23
0
    def on_netbox_type_changed(self, netbox_id, new_type, **_kwargs):
        """Performs various cleanup and reload actions on a netbox type change
        signal.

        The netbox' data are cleaned up, and the next netbox data reload is
        scheduled to take place immediately.

        """
        sysname = netbox_id in self.netboxes and \
            self.netboxes[netbox_id].sysname or str(netbox_id)
        self._logger.info("Cancelling all jobs for %s due to type change.",
                          sysname)
        self.cancel_netbox_scheduler(netbox_id)

        df = db.run_in_thread(shadows.Netbox.cleanup_replaced_netbox,
                              netbox_id, new_type)
        return df.addCallback(lambda x: self._start_netbox_reload_loop())
Ejemplo n.º 24
0
    def _create_new_type(self, oid):
        """Creates a new NetboxType from the given sysobjectid."""
        self._logger.debug("Creating a new type from %r", oid)
        description = yield self.snmpv2_mib.get_sysDescr()

        def _create():
            vendor = self._get_vendor(oid)
            type_ = manage.NetboxType(
                vendor=vendor,
                name=str(oid),
                sysobjectid=str(oid).strip("."),
                description=description,
            )
            type_.save()
            return type_

        new_type = yield db.run_in_thread(_create)
        defer.returnValue(new_type)
Ejemplo n.º 25
0
Archivo: lldp.py Proyecto: wujcheng/nav
    def handle(self):
        mib = LLDPMib(self.agent)
        stampcheck = yield self._stampcheck(mib)
        need_to_collect = yield stampcheck.is_changed()
        if need_to_collect:
            self._logger.debug("collecting LLDP remote table")
            self.remote = yield mib.get_remote_table()
            if self.remote:
                self._logger.debug("LLDP neighbors:\n %s",
                                   pformat(self.remote))
            yield run_in_thread(self._process_remote)

            # Store sentinel to signal that LLDP neighbors have been processed
            shadows.AdjacencyCandidate.sentinel(self.containers, SOURCE)
        else:
            self._logger.debug("LLDP remote table seems unchanged")

        stampcheck.save()
Ejemplo n.º 26
0
    def _load_existing_mappings(self):
        """Load the existing ARP records for this box from the db.

        Returns:

          A deferred whose result is a dictionary: { (ip, mac): arpid }
        """
        self._logger.debug("Loading open arp records from database")
        open_arp_records_queryset = manage.Arp.objects.filter(
            netbox__id=self.netbox.id,
            end_time__gte=datetime.max).values('id', 'ip', 'mac')
        open_arp_records = yield db.run_in_thread(
            storage.shadowify_queryset_and_commit, open_arp_records_queryset)
        self._logger.debug("Loaded %d open records from arp",
                           len(open_arp_records))

        open_mappings = dict(((IP(arp['ip']), arp['mac']), arp['id'])
                             for arp in open_arp_records)
        defer.returnValue(open_mappings)
Ejemplo n.º 27
0
    def handle(self):
        cdp = CiscoCDPMib(self.agent)
        stampcheck = yield self._stampcheck(cdp)
        need_to_collect = yield stampcheck.is_changed()
        if need_to_collect:
            self._logger.debug("collecting CDP cache table")
            cache = yield cdp.get_cdp_neighbors()
            if cache:
                self._logger.debug("found CDP cache data: %r", cache)
                self.cache = cache
                yield run_in_thread(self._process_cache)

            # Store sentinel to signal that CDP neighbors have been processed
            shadows.AdjacencyCandidate.sentinel(self.containers, SOURCE)

        else:
            self._logger.debug("CDP cache table seems unchanged")

        stampcheck.save()
Ejemplo n.º 28
0
    def _load_existing_mappings(self):
        """Load the existing ARP records for this box from the db.

        Returns:

          A deferred whose result is a dictionary: { (ip, mac): arpid }
        """
        self._logger.debug("Loading open arp records from database")
        open_arp_records_queryset = manage.Arp.objects.filter(
            netbox__id=self.netbox.id,
            end_time__gte=datetime.max).values('id', 'ip', 'mac')
        open_arp_records = yield db.run_in_thread(
            storage.shadowify_queryset_and_commit,
            open_arp_records_queryset)
        self._logger.debug("Loaded %d open records from arp",
                           len(open_arp_records))

        open_mappings = dict(((IP(arp['ip']), arp['mac']), arp['id'])
                             for arp in open_arp_records)
        defer.returnValue(open_mappings)
Ejemplo n.º 29
0
    def _log_job_externally(self, success=True):
        """Logs a job to the database"""
        duration = self.get_current_runtime()
        duration_in_seconds = (
            duration.days * 86400 + duration.seconds + duration.microseconds / 1e6
        )
        timestamp = time.time()

        def _create_record(timestamp):
            netbox = manage.Netbox.objects.get(id=self.netbox.id)
            if netbox.deleted_at:
                _logger.info(
                    "Not logging job to db; delete of this IP device"
                    " was requested at %s",
                    netbox.deleted_at,
                )
                return

            log = manage.IpdevpollJobLog(
                netbox_id=self.netbox.id,
                job_name=self.name,
                end_time=datetime.datetime.fromtimestamp(timestamp),
                duration=duration_in_seconds,
                success=success,
                interval=self.interval,
            )
            log.save()

        def _log_to_graphite():
            prefix = metric_prefix_for_ipdevpoll_job(self.netbox.sysname, self.name)
            runtime_path = prefix + ".runtime"
            runtime = (runtime_path, (timestamp, duration_in_seconds))
            send_metrics([runtime])

        _log_to_graphite()
        try:
            yield db.run_in_thread(_create_record, timestamp)
        except db.ResetDBConnectionError:
            pass  # this is being logged all over the place at the moment
        except Exception as error:
            _logger.warning("failed to log job to database: %s", error)
Ejemplo n.º 30
0
    def _save_container(self):
        """
        Parses the container and finds a sane storage order. We do this
        so we get ForeignKeys stored before the objects that are using them
        are stored.
        """
        @db.cleanup_django_debug_after
        def complete_save_cycle():
            # Traverse all the classes in the container repository and
            # generate the storage queue
            self._populate_storage_queue()
            # Prepare all shadow objects for storage.
            self._prepare_containers_for_save()
            # Actually save to the database
            result = self._perform_save()
            self._log_timed_result(result, "Storing to database complete")
            # Do cleanup for the known container classes.
            self._cleanup_containers_after_save()

        df = db.run_in_thread(complete_save_cycle)
        return df
Ejemplo n.º 31
0
    def save_container(self):
        """
        Parses the container and finds a sane storage order. We do this
        so we get ForeignKeys stored before the objects that are using them
        are stored.
        """
        @db.autocommit
        @db.cleanup_django_debug_after
        def complete_save_cycle():
            # Traverse all the classes in the container repository and
            # generate the storage queue
            self.populate_storage_queue()
            # Prepare all shadow objects for storage.
            self.prepare_containers_for_save()
            # Actually save to the database
            result = self.perform_save()
            self.log_timed_result(result, "Storing to database complete")
            # Do cleanup for the known container classes.
            self.cleanup_containers_after_save()

        df = db.run_in_thread(complete_save_cycle)
        return df
Ejemplo n.º 32
0
    def handle(self):
        """Gets forwarding tables from Q-BRIDGE-MIB. It that fails, reverts to
        BRIDGE-MIB, with optional community string indexing on Cisco.

        """
        fdb = yield self._get_dot1q_mac_port_mapping()
        self._log_fdb_stats("Q-BRIDGE-MIB", fdb)

        if not fdb:
            fdb = yield self._get_dot1d_mac_port_mapping()
            self._log_fdb_stats("BRIDGE-MIB", fdb)

        self.fdb = fdb

        self.monitored = yield db.run_in_thread(get_netbox_macs)
        self.my_macs = set(mac for mac, netboxid in self.monitored.items()
                           if netboxid == self.netbox.id)
        self._classify_ports()
        self._store_cam_records()
        self._store_adjacency_candidates()

        self.blocking = yield self._get_dot1d_stp_blocking()
Ejemplo n.º 33
0
def log_job_externally(job_handler, success=True, interval=None):
    """Logs a job to the database"""
    duration = job_handler.get_current_runtime()
    duration_in_seconds = (duration.days * 86400 +
                           duration.seconds +
                           duration.microseconds / 1e6)
    timestamp = time.time()

    @db.autocommit
    def _create_record(timestamp):
        log = manage.IpdevpollJobLog(
            netbox_id=job_handler.netbox.id,
            job_name=job_handler.name,
            end_time=datetime.datetime.fromtimestamp(timestamp),
            duration=duration_in_seconds,
            success=success,
            interval=interval
        )
        log.save()

    def _log_to_graphite():
        prefix = metric_prefix_for_ipdevpoll_job(job_handler.netbox.sysname,
                                                 job_handler.name)
        runtime_path = prefix + ".runtime"
        runtime = (runtime_path, (timestamp, duration_in_seconds))
        send_metrics([runtime])

        counter_path = (
            prefix + (".success-count" if success else ".failure-count"))
        _COUNTERS.increment(counter_path)
        _COUNTERS.start()

    _log_to_graphite()
    try:
        yield db.run_in_thread(_create_record, timestamp)
    except Exception, error:
        _logger.warning("failed to log job to database: %s", error)
Ejemplo n.º 34
0
 def _update_prefix_cache(cls):
     cls.prefix_cache_update_time = datetime.now()
     df = db.run_in_thread(cls._load_prefixes_synchronously)
     df.addCallback(cls._update_prefix_cache_with_result)
     return df
Ejemplo n.º 35
0
 def load_all(self):
     """Asynchronously load netboxes from database."""
     return run_in_thread(self.load_all_s)
Ejemplo n.º 36
0
 def _mark_as_up(self):
     self._logger.warning("SNMP agent up again on %s", self.netbox.sysname)
     yield db.run_in_thread(self._dispatch_up_event)
Ejemplo n.º 37
0
 def _mark_as_down(self):
     self._logger.warning("SNMP agent down on %s", self.netbox.sysname)
     yield db.run_in_thread(self._dispatch_down_event)
Ejemplo n.º 38
0
 def _mark_as_down(self):
     if self.netbox.id not in self.down_set:
         self._logger.warning("SNMP agent down on %s", self.netbox.sysname)
         self.down_set.add(self.netbox.id)
         yield db.run_in_thread(self._dispatch_down_event)
Ejemplo n.º 39
0
 def _handle_state_change(self, unit, new_state):
     self._logger.info("%s state changed from %s to %s", unit.name, unit.up,
                       new_state)
     yield db.run_in_thread(self._update_internal_state, unit, new_state)
     yield db.run_in_thread(self._post_event, unit, new_state)
Ejemplo n.º 40
0
 def can_handle(cls, netbox):
     daddy_says_ok = super(CDP, cls).can_handle(netbox)
     has_ifcs = yield run_in_thread(cls._has_interfaces, netbox)
     defer.returnValue(has_ifcs and daddy_says_ok)
Ejemplo n.º 41
0
 def _mark_as_up(self):
     if self.netbox.id in self.down_set:
         self._logger.warning("SNMP agent up again on %s",
                              self.netbox.sysname)
         self.down_set.remove(self.netbox.id)
         yield db.run_in_thread(self._dispatch_up_event)
Ejemplo n.º 42
0
 def can_handle(cls, netbox):
     daddy_says_ok = super(CDP, cls).can_handle(netbox)
     has_ifcs = yield run_in_thread(cls._has_interfaces, netbox)
     defer.returnValue(has_ifcs and daddy_says_ok)