def handle(self): cdp = CiscoCDPMib(self.agent) stampcheck = yield self._get_stampcheck(cdp) remote_table_has_changed = yield stampcheck.is_changed() need_to_collect = remote_table_has_changed if not remote_table_has_changed: cache = yield self._get_cached_neighbors() if cache is not None: self._logger.debug("Using cached CDP neighbors") self.neighbors = cache else: self._logger.debug( "CDP cache table didn't change, but local cache was empty") need_to_collect = True if need_to_collect: self._logger.debug("collecting CDP cache table") self.neighbors = yield cdp.get_cdp_neighbors() if self.neighbors: self._logger.debug("CDP neighbors:\n %r", self.neighbors) yield run_in_thread(self._process_neighbors) yield self._save_cached_neighbors(self.neighbors) else: self._logger.debug("No CDP neighbors to process") # Store sentinels to signal that CDP neighbors have been processed shadows.AdjacencyCandidate.sentinel(self.containers, SOURCE) shadows.UnrecognizedNeighbor.sentinel(self.containers, SOURCE) stampcheck.save()
def can_handle(cls, netbox): base_can_handle = yield defer.maybeDeferred( super(StatSensors, cls).can_handle, netbox) if base_can_handle: i_can_handle = yield run_in_thread(cls._has_sensors, netbox) defer.returnValue(i_can_handle) defer.returnValue(base_can_handle)
def _get_cached_neighbors(self): """Retrieves a cached version of the remote neighbor table""" value = yield run_in_thread( manage.NetboxInfo.cache_get, self.netbox, INFO_KEY_NAME, INFO_VAR_NEIGHBORS_CACHE ) defer.returnValue(value)
def _save_cached_neighbors(self, neighbors): """Saves a cached a copy of the remote neighbor table""" yield run_in_thread( manage.NetboxInfo.cache_set, self.netbox, INFO_KEY_NAME, INFO_VAR_NEIGHBORS_CACHE, neighbors )
def _get_cached_remote_table(self): """Retrieves a cached version of the remote neighbor table""" value = yield run_in_thread( manage.NetboxInfo.cache_get, self.netbox, INFO_KEY_LLDP_INFO, INFO_VAR_REMOTES_CACHE, ) defer.returnValue(value)
def _save_cached_remote_table(self, remote_table): """Saves a cached copy of the remote neighbor table""" yield run_in_thread( manage.NetboxInfo.cache_set, self.netbox, INFO_KEY_LLDP_INFO, INFO_VAR_REMOTES_CACHE, remote_table, )
def _get_type_from_db(self): """Loads from db a type object matching the sysobjectid.""" def _single_result(result): if result: return result[0] # Look up existing type entry types = manage.NetboxType.objects.filter(sysobjectid=self.sysobjectid) df = db.run_in_thread(storage.shadowify_queryset_and_commit, types) df.addCallback(_single_result) return df
def handle(self): self._logger.debug("snmp version from db: %s", self.netbox.snmp_version) was_down = yield db.run_in_thread(self._currently_down) is_ok = yield self._do_check() if is_ok and was_down: yield self._mark_as_up() elif not is_ok and not was_down: yield self._mark_as_down() raise SuggestedReschedule(delay=60)
def handle(self): self._logger.debug("snmp version from db: %s", self.netbox.snmp_version) was_down = yield db.run_in_thread(self._currently_down) is_ok = yield self._do_check() if is_ok and was_down: yield self._mark_as_up() elif not is_ok: # Always send down events; eventengine will ignore any duplicates yield self._mark_as_down() raise SuggestedReschedule(delay=60)
def handle(self): mib = LLDPMib(self.agent) stampcheck = yield self._stampcheck(mib) need_to_collect = yield stampcheck.is_changed() if need_to_collect: self.remote = yield mib.get_remote_table() if self.remote: self._logger.debug("LLDP neighbors:\n %s", pformat(self.remote)) yield run_in_thread(self._process_remote) stampcheck.save()
def handle(self): cdp = CiscoCDPMib(self.agent) stampcheck = yield self._stampcheck(cdp) need_to_collect = yield stampcheck.is_changed() if need_to_collect: cache = yield cdp.get_cdp_neighbors() if cache: self._logger.debug("found CDP cache data: %r", cache) self.cache = cache yield run_in_thread(self._process_cache) stampcheck.save()
def handle(self): mib = LLDPMib(self.agent) stampcheck = yield self._stampcheck(mib) need_to_collect = yield stampcheck.is_changed() if need_to_collect: self.remote = yield mib.get_remote_table() if self.remote: self._logger.debug("LLDP neighbors:\n %s", pformat(self.remote)) yield run_in_thread(self._process_remote) stampcheck.save()
def handle(self): if self.netbox.master: defer.returnValue(None) netboxes = yield db.run_in_thread(self._get_netbox_list) bandwidth = yield self._collect_bandwidth(netboxes) cpu = yield self._collect_cpu(netboxes) sysuptime = yield self._collect_sysuptime(netboxes) memory = yield self._collect_memory(netboxes) metrics = bandwidth + cpu + sysuptime + memory if metrics: send_metrics(metrics)
def _get_type_from_db(self): """Loads from db a type object matching the sysobjectid.""" def _single_result(result): if result: return result[0] # Look up existing type entry types = manage.NetboxType.objects.filter(sysobjectid=self.sysobjectid) df = db.run_in_thread(storage.shadowify_queryset_and_commit, types) df.addCallback(_single_result) return df
def handle(self): units = yield db.run_in_thread(self._get_database_unit_list) state_map = {} for unit in units: old_state = unit.up new_state = yield self._retrieve_current_unit_state(unit) state_map[unit] = new_state if old_state != new_state: yield self._handle_state_change(unit, new_state) defer.returnValue(True)
def handle(self): cdp = CiscoCDPMib(self.agent) stampcheck = yield self._stampcheck(cdp) need_to_collect = yield stampcheck.is_changed() if need_to_collect: cache = yield cdp.get_cdp_neighbors() if cache: self._logger.debug("found CDP cache data: %r", cache) self.cache = cache yield run_in_thread(self._process_cache) stampcheck.save()
def handle(self): sensors = yield run_in_thread(self._get_sensors) self._logger.debug("retrieving data from %d sensors", len(sensors)) oids = sensors.keys() requests = [ oids[x:x + MAX_SENSORS_PER_REQUEST] for x in range(0, len(oids), MAX_SENSORS_PER_REQUEST) ] for req in requests: data = yield self.agent.get(req).addCallback( self._response_to_metrics, sensors) self._logger.debug("got data from sensors: %r", data)
def handle(self): if self.netbox.master: yield self._log_instance_details() defer.returnValue(None) timestamp = time.time() stats = yield self._get_stats() netboxes = yield db.run_in_thread(self._get_netbox_list) tuples = list(self._make_metrics(stats, netboxes=netboxes, timestamp=timestamp)) if tuples: self._logger.debug("Counters collected") send_metrics(tuples)
def _log_instance_details(self): def _get_master_and_instance_list(): netbox = manage.Netbox.objects.get(id=self.netbox.id) my_ifcs = netbox.interface_set.values_list('ifname', flat=True) masters_ifcs = netbox.master.interface_set.values_list( 'ifname', flat=True) local_ifcs = set(masters_ifcs) - set(my_ifcs) return netbox.master.sysname, local_ifcs if self._logger.isEnabledFor(logging.DEBUG): master, ifcs = yield db.run_in_thread(_get_master_and_instance_list) self._logger.debug("local interfaces (that do not exist on master " "%s): %r", master, ifcs)
def load(self): "Loads existing timestamps from db" def _unpickle(): try: info = manage.NetboxInfo.objects.get( netbox__id=self._get_netbox().id, key=INFO_KEY_NAME, variable=self.var_name) except manage.NetboxInfo.DoesNotExist: return None try: return pickle.loads(str(info.value)) except Exception: return None self.loaded_times = yield db.run_in_thread(_unpickle) defer.returnValue(self.loaded_times)
def create_new_type(self): """Creates a new NetboxType from the collected sysObjectID.""" vendor_id = yield db.run_in_thread(get_vendor_id, self.sysobjectid) vendor = self.containers.factory(vendor_id, shadows.Vendor) vendor.id = vendor_id type_ = self.containers.factory(self.sysobjectid, shadows.NetboxType) type_.vendor = vendor type_.name = self.sysobjectid type_.sysobjectid = self.sysobjectid def _set_sysdescr(descr): self._logger.debug("Creating new type with descr=%r", descr) type_.description = descr return type_ yield self.snmpv2_mib.get_sysDescr().addCallback(_set_sysdescr)
def on_netbox_type_changed(self, netbox_id, new_type, **kwargs): """Performs various cleanup and reload actions on a netbox type change signal. The netbox' data are cleaned up, and the next netbox data reload is scheduled to take place immediately. """ sysname = netbox_id in self.netboxes and \ self.netboxes[netbox_id].sysname or str(netbox_id) self._logger.info("Cancelling all jobs for %s due to type change.", sysname) self.cancel_netbox_scheduler(netbox_id) df = db.run_in_thread(shadows.Netbox.cleanup_replaced_netbox, netbox_id, new_type) return df.addCallback(lambda x: self._start_netbox_reload_loop())
def on_netbox_type_changed(self, netbox_id, new_type, **_kwargs): """Performs various cleanup and reload actions on a netbox type change signal. The netbox' data are cleaned up, and the next netbox data reload is scheduled to take place immediately. """ sysname = netbox_id in self.netboxes and \ self.netboxes[netbox_id].sysname or str(netbox_id) self._logger.info("Cancelling all jobs for %s due to type change.", sysname) self.cancel_netbox_scheduler(netbox_id) df = db.run_in_thread(shadows.Netbox.cleanup_replaced_netbox, netbox_id, new_type) return df.addCallback(lambda x: self._start_netbox_reload_loop())
def _create_new_type(self, oid): """Creates a new NetboxType from the given sysobjectid.""" self._logger.debug("Creating a new type from %r", oid) description = yield self.snmpv2_mib.get_sysDescr() def _create(): vendor = self._get_vendor(oid) type_ = manage.NetboxType( vendor=vendor, name=str(oid), sysobjectid=str(oid).strip("."), description=description, ) type_.save() return type_ new_type = yield db.run_in_thread(_create) defer.returnValue(new_type)
def handle(self): mib = LLDPMib(self.agent) stampcheck = yield self._stampcheck(mib) need_to_collect = yield stampcheck.is_changed() if need_to_collect: self._logger.debug("collecting LLDP remote table") self.remote = yield mib.get_remote_table() if self.remote: self._logger.debug("LLDP neighbors:\n %s", pformat(self.remote)) yield run_in_thread(self._process_remote) # Store sentinel to signal that LLDP neighbors have been processed shadows.AdjacencyCandidate.sentinel(self.containers, SOURCE) else: self._logger.debug("LLDP remote table seems unchanged") stampcheck.save()
def _load_existing_mappings(self): """Load the existing ARP records for this box from the db. Returns: A deferred whose result is a dictionary: { (ip, mac): arpid } """ self._logger.debug("Loading open arp records from database") open_arp_records_queryset = manage.Arp.objects.filter( netbox__id=self.netbox.id, end_time__gte=datetime.max).values('id', 'ip', 'mac') open_arp_records = yield db.run_in_thread( storage.shadowify_queryset_and_commit, open_arp_records_queryset) self._logger.debug("Loaded %d open records from arp", len(open_arp_records)) open_mappings = dict(((IP(arp['ip']), arp['mac']), arp['id']) for arp in open_arp_records) defer.returnValue(open_mappings)
def handle(self): cdp = CiscoCDPMib(self.agent) stampcheck = yield self._stampcheck(cdp) need_to_collect = yield stampcheck.is_changed() if need_to_collect: self._logger.debug("collecting CDP cache table") cache = yield cdp.get_cdp_neighbors() if cache: self._logger.debug("found CDP cache data: %r", cache) self.cache = cache yield run_in_thread(self._process_cache) # Store sentinel to signal that CDP neighbors have been processed shadows.AdjacencyCandidate.sentinel(self.containers, SOURCE) else: self._logger.debug("CDP cache table seems unchanged") stampcheck.save()
def _load_existing_mappings(self): """Load the existing ARP records for this box from the db. Returns: A deferred whose result is a dictionary: { (ip, mac): arpid } """ self._logger.debug("Loading open arp records from database") open_arp_records_queryset = manage.Arp.objects.filter( netbox__id=self.netbox.id, end_time__gte=datetime.max).values('id', 'ip', 'mac') open_arp_records = yield db.run_in_thread( storage.shadowify_queryset_and_commit, open_arp_records_queryset) self._logger.debug("Loaded %d open records from arp", len(open_arp_records)) open_mappings = dict(((IP(arp['ip']), arp['mac']), arp['id']) for arp in open_arp_records) defer.returnValue(open_mappings)
def _log_job_externally(self, success=True): """Logs a job to the database""" duration = self.get_current_runtime() duration_in_seconds = ( duration.days * 86400 + duration.seconds + duration.microseconds / 1e6 ) timestamp = time.time() def _create_record(timestamp): netbox = manage.Netbox.objects.get(id=self.netbox.id) if netbox.deleted_at: _logger.info( "Not logging job to db; delete of this IP device" " was requested at %s", netbox.deleted_at, ) return log = manage.IpdevpollJobLog( netbox_id=self.netbox.id, job_name=self.name, end_time=datetime.datetime.fromtimestamp(timestamp), duration=duration_in_seconds, success=success, interval=self.interval, ) log.save() def _log_to_graphite(): prefix = metric_prefix_for_ipdevpoll_job(self.netbox.sysname, self.name) runtime_path = prefix + ".runtime" runtime = (runtime_path, (timestamp, duration_in_seconds)) send_metrics([runtime]) _log_to_graphite() try: yield db.run_in_thread(_create_record, timestamp) except db.ResetDBConnectionError: pass # this is being logged all over the place at the moment except Exception as error: _logger.warning("failed to log job to database: %s", error)
def _save_container(self): """ Parses the container and finds a sane storage order. We do this so we get ForeignKeys stored before the objects that are using them are stored. """ @db.cleanup_django_debug_after def complete_save_cycle(): # Traverse all the classes in the container repository and # generate the storage queue self._populate_storage_queue() # Prepare all shadow objects for storage. self._prepare_containers_for_save() # Actually save to the database result = self._perform_save() self._log_timed_result(result, "Storing to database complete") # Do cleanup for the known container classes. self._cleanup_containers_after_save() df = db.run_in_thread(complete_save_cycle) return df
def save_container(self): """ Parses the container and finds a sane storage order. We do this so we get ForeignKeys stored before the objects that are using them are stored. """ @db.autocommit @db.cleanup_django_debug_after def complete_save_cycle(): # Traverse all the classes in the container repository and # generate the storage queue self.populate_storage_queue() # Prepare all shadow objects for storage. self.prepare_containers_for_save() # Actually save to the database result = self.perform_save() self.log_timed_result(result, "Storing to database complete") # Do cleanup for the known container classes. self.cleanup_containers_after_save() df = db.run_in_thread(complete_save_cycle) return df
def handle(self): """Gets forwarding tables from Q-BRIDGE-MIB. It that fails, reverts to BRIDGE-MIB, with optional community string indexing on Cisco. """ fdb = yield self._get_dot1q_mac_port_mapping() self._log_fdb_stats("Q-BRIDGE-MIB", fdb) if not fdb: fdb = yield self._get_dot1d_mac_port_mapping() self._log_fdb_stats("BRIDGE-MIB", fdb) self.fdb = fdb self.monitored = yield db.run_in_thread(get_netbox_macs) self.my_macs = set(mac for mac, netboxid in self.monitored.items() if netboxid == self.netbox.id) self._classify_ports() self._store_cam_records() self._store_adjacency_candidates() self.blocking = yield self._get_dot1d_stp_blocking()
def log_job_externally(job_handler, success=True, interval=None): """Logs a job to the database""" duration = job_handler.get_current_runtime() duration_in_seconds = (duration.days * 86400 + duration.seconds + duration.microseconds / 1e6) timestamp = time.time() @db.autocommit def _create_record(timestamp): log = manage.IpdevpollJobLog( netbox_id=job_handler.netbox.id, job_name=job_handler.name, end_time=datetime.datetime.fromtimestamp(timestamp), duration=duration_in_seconds, success=success, interval=interval ) log.save() def _log_to_graphite(): prefix = metric_prefix_for_ipdevpoll_job(job_handler.netbox.sysname, job_handler.name) runtime_path = prefix + ".runtime" runtime = (runtime_path, (timestamp, duration_in_seconds)) send_metrics([runtime]) counter_path = ( prefix + (".success-count" if success else ".failure-count")) _COUNTERS.increment(counter_path) _COUNTERS.start() _log_to_graphite() try: yield db.run_in_thread(_create_record, timestamp) except Exception, error: _logger.warning("failed to log job to database: %s", error)
def _update_prefix_cache(cls): cls.prefix_cache_update_time = datetime.now() df = db.run_in_thread(cls._load_prefixes_synchronously) df.addCallback(cls._update_prefix_cache_with_result) return df
def load_all(self): """Asynchronously load netboxes from database.""" return run_in_thread(self.load_all_s)
def _mark_as_up(self): self._logger.warning("SNMP agent up again on %s", self.netbox.sysname) yield db.run_in_thread(self._dispatch_up_event)
def _mark_as_down(self): self._logger.warning("SNMP agent down on %s", self.netbox.sysname) yield db.run_in_thread(self._dispatch_down_event)
def _mark_as_down(self): if self.netbox.id not in self.down_set: self._logger.warning("SNMP agent down on %s", self.netbox.sysname) self.down_set.add(self.netbox.id) yield db.run_in_thread(self._dispatch_down_event)
def _handle_state_change(self, unit, new_state): self._logger.info("%s state changed from %s to %s", unit.name, unit.up, new_state) yield db.run_in_thread(self._update_internal_state, unit, new_state) yield db.run_in_thread(self._post_event, unit, new_state)
def can_handle(cls, netbox): daddy_says_ok = super(CDP, cls).can_handle(netbox) has_ifcs = yield run_in_thread(cls._has_interfaces, netbox) defer.returnValue(has_ifcs and daddy_says_ok)
def _mark_as_up(self): if self.netbox.id in self.down_set: self._logger.warning("SNMP agent up again on %s", self.netbox.sysname) self.down_set.remove(self.netbox.id) yield db.run_in_thread(self._dispatch_up_event)
def can_handle(cls, netbox): daddy_says_ok = super(CDP, cls).can_handle(netbox) has_ifcs = yield run_in_thread(cls._has_interfaces, netbox) defer.returnValue(has_ifcs and daddy_says_ok)