def test_config_file_not_found(self): with self.assertLogs( "splunk_connect_for_snmp.common.profiles", level="INFO" ) as cm: load_profiles() self.assertTrue( any( [ el for el in cm.output if "runtime_config_that_doesnt_exist.yaml not found" in el ] ) )
def inventory_setup_poller(self, work): address = work["address"] if time.time() - self.last_modified > PROFILES_RELOAD_DELAY: self.profiles = load_profiles() self.last_modified = time.time() logger.debug("Profiles reloaded") periodic_obj = customtaskmanager.CustomPeriodicTaskManager() mongo_client = pymongo.MongoClient(MONGO_URI) mongo_db = mongo_client[MONGO_DB] mongo_inventory = mongo_db.inventory targets_collection = mongo_db.targets ir = get_inventory(mongo_inventory, address) target = targets_collection.find_one( {"address": address}, {"target": True, "state": True, "config": True}, ) assigned_profiles = assign_profiles(ir, self.profiles, target) active_schedules: list[str] = [] for period in assigned_profiles: task_config = generate_poll_task_definition( active_schedules, address, assigned_profiles, period ) periodic_obj.manage_task(**task_config) periodic_obj.delete_unused_poll_tasks(f"{address}", active_schedules) periodic_obj.delete_disabled_poll_tasks()
def __init__(self, **kwargs): self.standard_mibs = [] self.mongo_client = pymongo.MongoClient(MONGO_URI) if kwargs.get("no_mongo"): self.session = Session() else: self.session = CachedLimiterSession( per_second=120, cache_name="cache_http", backend=MongoCache(connection=self.mongo_client, db_name=MONGO_DB), expire_after=1800, match_headers=False, stale_if_error=True, allowable_codes=[200], ) self.profiles = load_profiles() self.last_modified = time.time() self.snmpEngine = SnmpEngine() self.builder = self.snmpEngine.getMibBuilder() self.mib_view_controller = view.MibViewController(self.builder) compiler.addMibCompiler(self.builder, sources=[MIB_SOURCES]) # mib_standard_response = self.session.get(f"{MIB_STANDARD}", stream=True) # if mib_standard_response.status_code == 200: # for line in mib_standard_response.iter_lines(): # if line: # mib = line.decode("utf-8") # logger.info(f"MIB: {mib}") # try: # self.builder.loadModules(mib) # self.standard_mibs.append(mib) # except Exception as e: # logger.warning(f"An error occurred during loading MIB module: {mib}: {e}") # else: for mib in DEFAULT_STANDARD_MIBS: self.standard_mibs.append(mib) self.builder.loadModules(mib) mib_response = self.session.get(f"{MIB_INDEX}") self.mib_map = {} if mib_response.status_code == 200: with StringIO(mib_response.text) as index_csv: reader = csv.reader(index_csv) for each_row in reader: if len(each_row) == 2: self.mib_map[each_row[1]] = each_row[0] logger.debug(f"Loaded {len(self.mib_map.keys())} mib map entries") else: logger.error( f"Unable to load mib map from index http error {self.mib_response.status_code}" )
def test_disabled_profiles(self): active_profiles = { "EnirchIF": { "frequency": 200, "condition": {"type": "base"}, "varBinds": [ ["IF-MIB", "ifDescr"], ["IF-MIB", "ifAdminStatus"], ["IF-MIB", "ifName"], ], } } self.assertEqual(load_profiles(), active_profiles)
def test_all_profiles(self): active_profiles = { "BaseUpTime": { "frequency": 300, "condition": {"type": "base"}, "varBinds": [ ["IF-MIB", "ifName"], ["IF-MIB", "ifAlias"], ["SNMPv2-MIB", "sysUpTime", 0], ], }, "EnirchIF": { "frequency": 600, "condition": {"type": "base"}, "varBinds": [ ["IF-MIB", "ifDescr"], ["IF-MIB", "ifAdminStatus"], ["IF-MIB", "ifName"], ["IF-MIB", "ifAlias"], ], }, "test_2": { "frequency": 120, "varBinds": [ ["IF-MIB", "ifInDiscards", 1], ["IF-MIB", "ifOutErrors"], ["SNMPv2-MIB", "sysDescr", 0], ], }, "new_profiles": {"frequency": 6, "varBinds": [["IP-MIB"]]}, "generic_switch": { "frequency": 5, "varBinds": [ ["SNMPv2-MIB", "sysDescr"], ["SNMPv2-MIB", "sysName", 0], ["IF-MIB"], ["TCP-MIB"], ["UDP-MIB"], ], }, } self.assertEqual(load_profiles(), active_profiles)
def test_read_base_profiles(self): active_profiles = { "BaseUpTime": { "frequency": 300, "condition": {"type": "base"}, "varBinds": [ ["IF-MIB", "ifName"], ["IF-MIB", "ifAlias"], ["SNMPv2-MIB", "sysUpTime", 0], ], }, "EnirchIF": { "frequency": 600, "condition": {"type": "base"}, "varBinds": [ ["IF-MIB", "ifDescr"], ["IF-MIB", "ifAdminStatus"], ["IF-MIB", "ifName"], ["IF-MIB", "ifAlias"], ], }, } self.assertEqual(load_profiles(), active_profiles)
def test_runtime_profiles(self): active_profiles = { "test_2": { "frequency": 120, "varBinds": [ ["IF-MIB", "ifInDiscards", 1], ["IF-MIB", "ifOutErrors"], ["SNMPv2-MIB", "sysDescr", 0], ], }, "new_profiles": {"frequency": 6, "varBinds": [["IP-MIB"]]}, "generic_switch": { "frequency": 5, "varBinds": [ ["SNMPv2-MIB", "sysDescr"], ["SNMPv2-MIB", "sysName", 0], ["IF-MIB"], ["TCP-MIB"], ["UDP-MIB"], ], }, } self.assertEqual(load_profiles(), active_profiles)
def do_work( self, ir: InventoryRecord, walk: bool = False, profiles: List[str] = None, walked_first_time=True, ): retry = False address = transform_address_to_key(ir.address, ir.port) if time.time() - self.last_modified > PROFILES_RELOAD_DELAY: self.profiles = load_profiles() self.last_modified = time.time() logger.debug("Profiles reloaded") varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = self.get_var_binds( address, walk=walk, profiles=profiles, walked_first_time=walked_first_time) authData = GetAuth(logger, ir, self.snmpEngine) contextData = get_context_data() transport = UdpTransportTarget((ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT) metrics: Dict[str, Any] = {} if not varbinds_get and not varbinds_bulk: logger.info(f"No work to do for {address}") return False, {} if varbinds_bulk: for ( errorIndication, errorStatus, errorIndex, varBindTable, ) in bulkCmd( self.snmpEngine, authData, transport, contextData, 1, 10, *varbinds_bulk, lexicographicMode=False, ignoreNonIncreasingOid=is_increasing_oids_ignored( ir.address, ir.port), ): if not _any_failure_happened( errorIndication, errorStatus, errorIndex, varBindTable, ir.address, walk, ): tmp_retry, tmp_mibs, _ = self.process_snmp_data( varBindTable, metrics, address, bulk_mapping) if tmp_mibs: self.load_mibs(tmp_mibs) self.process_snmp_data(varBindTable, metrics, address, bulk_mapping) if varbinds_get: for ( errorIndication, errorStatus, errorIndex, varBindTable, ) in getCmd(self.snmpEngine, authData, transport, contextData, *varbinds_get): if not _any_failure_happened( errorIndication, errorStatus, errorIndex, varBindTable, ir.address, walk, ): self.process_snmp_data(varBindTable, metrics, address, get_mapping) for group_key, metric in metrics.items(): if "profiles" in metrics[group_key]: metrics[group_key]["profiles"] = ",".join( metrics[group_key]["profiles"]) return retry, metrics
def __init__(self): self.profiles = load_profiles() self.last_modified = time.time()
def load(): path = INVENTORY_PATH inventory_errors = False mongo_client = pymongo.MongoClient(MONGO_URI) targets_collection = mongo_client.sc4snmp.targets attributes_collection = mongo_client.sc4snmp.attributes mongo_db = mongo_client[MONGO_DB] inventory_records = mongo_db.inventory periodic_obj = customtaskmanager.CustomPeriodicTaskManager() migrate_database(mongo_client, periodic_obj) config_profiles = load_profiles() logger.info(f"Loading inventory from {path}") with open(path, encoding="utf-8") as csv_file: # Dict reader will trust the header of the csv ir_reader = DictReader(csv_file) for source_record in ir_reader: address = source_record["address"] if address.startswith("#"): logger.warning( f"Record: {address} is commented out. Skipping...") continue try: ir = InventoryRecord(**source_record) target = transform_address_to_key(ir.address, ir.port) if ir.delete: periodic_obj.disable_tasks(target) inventory_records.delete_one({ "address": ir.address, "port": ir.port }) targets_collection.remove({"address": target}) attributes_collection.remove({"address": target}) logger.info(f"Deleting record: {target}") else: status = inventory_records.update_one( { "address": ir.address, "port": ir.port }, {"$set": ir.asdict()}, upsert=True, ) profiles = source_record["profiles"].split(";") profile = None if profiles: profiles = [ p for p in profiles if config_profiles.get(p, {}).get( "condition", {}).get("type") == "walk" ] if profiles: profile = profiles[-1] ir.walk_interval = source_record["walk_interval"] if status.matched_count == 0: logger.info(f"New Record {ir} {status.upserted_id}") elif status.modified_count == 1 and status.upserted_id is None: logger.info(f"Modified Record {ir}") else: logger.debug(f"Unchanged Record {ir}") continue task_config = gen_walk_task(ir, profile) periodic_obj.manage_task(**task_config) except Exception as e: inventory_errors = True logger.exception(f"Exception raised for {target}: {e}") return inventory_errors