class Scanner(Fetcher): config = None environment = None env = None root_patern = None scan_queue = queue.Queue() scan_queue_track = {} # keep errors indication per environment found_errors = {} def __init__(self): """ Scanner is the base class for scanners. """ super().__init__() self.config = Configuration() self.inv = InventoryMgr() self.scanners_package = None self.scanners = {} self.link_finders = [] self.load_scanners_metadata() self.load_link_finders_metadata() def scan(self, scanner_type, obj, id_field="id", limit_to_child_id=None, limit_to_child_type=None): types_to_fetch = self.get_scanner(scanner_type) types_children = [] if not limit_to_child_type: limit_to_child_type = [] elif isinstance(limit_to_child_type, str): limit_to_child_type = [limit_to_child_type] try: for t in types_to_fetch: if limit_to_child_type and t["type"] not in limit_to_child_type: continue children = self.scan_type(t, obj, id_field) if limit_to_child_id: children = [ c for c in children if c[id_field] == limit_to_child_id ] if not children: continue types_children.append({ "type": t["type"], "children": children }) except ValueError: return False except SshError: # mark the error self.found_errors[self.get_env()] = True if limit_to_child_id and len(types_children) > 0: t = types_children[0] children = t["children"] return children[0] return obj def check_type_env(self, type_to_fetch): # check if type is to be run in this environment basic_cond = {'environment_type': self.ENV_TYPE_OPENSTACK} env_cond = type_to_fetch.get("environment_condition", {}) \ if "environment_condition" in type_to_fetch \ else basic_cond if not env_cond: env_cond = basic_cond if 'environment_type' not in env_cond.keys(): env_cond.update(basic_cond) if not isinstance(env_cond, dict): self.log.warn('Illegal environment_condition given ' 'for type {type}'.format(type=type_to_fetch['type'])) return True conf = self.config.get_env_config() if 'environment_type' not in conf: conf.update(basic_cond) for attr, required_val in env_cond.items(): if attr == "mechanism_drivers": if "mechanism_drivers" not in conf: self.log.warn('Illegal environment configuration: ' 'missing mechanism_drivers') return False if not isinstance(required_val, list): required_val = [required_val] value_ok = bool( set(required_val) & set(conf["mechanism_drivers"])) if not value_ok: return False elif attr not in conf: return False else: if isinstance(required_val, list): if conf[attr] not in required_val: return False else: if conf[attr] != required_val: return False # no check failed return True def scan_type(self, type_to_fetch, parent, id_field): # check if type is to be run in this environment if not self.check_type_env(type_to_fetch): return [] if not parent: obj_id = None else: obj_id = str(parent[id_field]) if not obj_id or not obj_id.rstrip(): raise ValueError("Object missing " + id_field + " attribute") # get Fetcher instance fetcher = type_to_fetch["fetcher"] if not isinstance(fetcher, Fetcher): type_to_fetch['fetcher'] = fetcher() # make it an instance fetcher = type_to_fetch["fetcher"] fetcher.setup(env=self.get_env(), origin=self.origin) # get children_scanner instance children_scanner = type_to_fetch.get("children_scanner") escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id self.log.info("Scanning: type={type}, " "parent: (type={parent_type}, " "name={parent_name}, " "id={parent_id})".format( type=type_to_fetch["type"], parent_type=parent.get('type', 'environment'), parent_name=parent.get('name', ''), parent_id=escaped_id)) # fetch OpenStack data from environment by CLI, API or MySQL # or physical devices data from ACI API # It depends on the Fetcher's config. try: db_results = fetcher.get(escaped_id) except SshError: self.found_errors[self.get_env()] = True return [] except Exception as e: self.log.error( "Error while scanning: fetcher={fetcher}, type={type}, " "parent: (type={parent_type}, name={parent_name}, " "id={parent_id}), " "error: {error}".format( fetcher=fetcher.__class__.__name__, type=type_to_fetch["type"], parent_type="environment" if "type" not in parent else parent["type"], parent_name="" if "name" not in parent else parent["name"], parent_id=escaped_id, error=e)) traceback.print_exc() raise ScanError(str(e)) # format results if isinstance(db_results, dict): results = db_results["rows"] if db_results["rows"] else [ db_results ] elif isinstance(db_results, str): results = json.loads(db_results) else: results = db_results # get child_id_field try: child_id_field = type_to_fetch["object_id_to_use_in_child"] except KeyError: child_id_field = "id" environment = self.get_env() children = [] for o in results: saved = self.inv.save_inventory_object(o, parent=parent, environment=environment, type_to_fetch=type_to_fetch) if saved: # add objects into children list. children.append(o) # put children scanner into queue if children_scanner: self.queue_for_scan(o, child_id_field, children_scanner) return children # scanning queued items, rather than going depth-first (DFS) # this is done to allow collecting all required data for objects # before continuing to next level # for example, get host ID from API os-hypervisors call, so later # we can use this ID in the "os-hypervisors/<ID>/servers" call @staticmethod def queue_for_scan(o, child_id_field, children_scanner): if o["id"] in Scanner.scan_queue_track: return Scanner.scan_queue_track[o["type"] + ";" + o["id"]] = 1 Scanner.scan_queue.put({ "object": o, "child_id_field": child_id_field, "scanner": children_scanner }) def run_scan(self, scanner_type, obj, id_field, child_id, child_type): results = self.scan(scanner_type, obj, id_field, child_id, child_type) # run children scanner from queue. self.scan_from_queue() return results def scan_from_queue(self): while not Scanner.scan_queue.empty(): item = Scanner.scan_queue.get() scanner_type = item["scanner"] # scan the queued item self.scan(scanner_type, item["object"], item["child_id_field"]) self.log.info("Scan complete") def scan_links(self): self.log.info("Scanning for links") for fetcher in self.link_finders: fetcher.setup(env=self.get_env(), origin=self.origin) fetcher.add_links() def scan_cliques(self): clique_scanner = CliqueFinder() clique_scanner.setup(env=self.get_env(), origin=self.origin) clique_scanner.find_cliques() def deploy_monitoring_setup(self): ret = self.inv.monitoring_setup_manager.handle_pending_setup_changes() if not ret: self.found_errors[self.get_env()] = True def get_run_app_path(self): conf = self.config.get_env_config() run_app_path = conf.get('run_app_path', '') if not run_app_path: run_app_path = conf.get('app_path', '/etc/calipso') return run_app_path def load_scanners_metadata(self): parser = ScanMetadataParser(self.inv) scanners_file = os.path.join(self.get_run_app_path(), 'config', ScanMetadataParser.SCANNERS_FILE) metadata = parser.parse_metadata_file(scanners_file) self.scanners_package = metadata[ScanMetadataParser.SCANNERS_PACKAGE] self.scanners = metadata[ScanMetadataParser.SCANNERS] def load_link_finders_metadata(self): parser = FindLinksMetadataParser() finders_file = os.path.join(self.get_run_app_path(), 'config', FindLinksMetadataParser.FINDERS_FILE) metadata = parser.parse_metadata_file(finders_file) self.link_finders = metadata[FindLinksMetadataParser.LINK_FINDERS] def get_scanner_package(self): return self.scanners_package def get_scanner(self, scanner_type: str) -> dict: return self.scanners.get(scanner_type)
class AciFetchLeafToSpinePnics(AciAccess): def __init__(self): super().__init__() self.inv = InventoryMgr() def fetch_switches_by_role(self, role_name): query_filter = {"query-target-filter": "eq(fabricNode.role, \"{}\")".format(role_name)} switches = self.fetch_objects_by_class("fabricNode", query_filter) return [switch["attributes"] for switch in switches] def fetch_adjacent_connections(self, device_id): dn = "/".join((device_id, "sys")) response = self.fetch_mo_data(dn, {"query-target": "subtree", "target-subtree-class": "lldpAdjEp"}) connections = self.get_objects_by_field_names(response, "lldpAdjEp", "attributes") return connections # Returns: # List of: # 1. Switches with role "spine" # 2. Downlink pnic id for spine switch # 3. Uplink pnic id for leaf switch def fetch_spines_and_pnics_by_leaf_id(self, leaf_id): spine_switches = self.fetch_switches_by_role("spine") adjacent_devices = self.fetch_adjacent_connections(leaf_id) spines = [] for spine in spine_switches: # Check if spine switch is connected to current leaf switch connection = next((d for d in adjacent_devices if spine["name"] == d["sysName"]), None) if connection: try: # Extract pnics from adjacency data uplink_pnic = re.match(".*\[(.+?)\].*", connection["dn"]).group(1) downlink_pnic = re.match(".*\[(.+?)\].*", connection["portDesc"]).group(1) spines.append({ "device": spine, "downlink_pnic": downlink_pnic, "uplink_pnic": uplink_pnic }) except AttributeError: continue # TODO: probably raise an exception return spines @aci_config_required(default=[]) def get(self, db_leaf_pnic_id): environment = self.get_env() leaf_pnic = self.inv.get_by_id(environment=environment, item_id=db_leaf_pnic_id) leaf_switch_id = leaf_pnic['switch'] # Decode aci leaf switch id from db format aci_leaf_pnic_id = decode_aci_dn(db_leaf_pnic_id) aci_leaf_id = re.match("switch-(.+?)-leaf", aci_leaf_pnic_id).group(1) # Fetch all leaf-to-spine connectivity data spines_with_pnics = self.fetch_spines_and_pnics_by_leaf_id(aci_leaf_id) pnics = [] for spine_with_pnic in spines_with_pnics: spine = spine_with_pnic["device"] downlink_pnic_id = spine_with_pnic["downlink_pnic"] uplink_pnic_id = spine_with_pnic["uplink_pnic"] # Add spine switch to db if it's not there yet spine_id_match = re.match("topology/(.+)", spine["dn"]) if not spine_id_match: raise ValueError("Failed to fetch spine switch id " "from switch dn: {}".format(spine["dn"])) aci_spine_id = spine_id_match.group(1) db_spine_id = "-".join(("switch", encode_aci_dn(aci_spine_id), spine["role"])) if not self.inv.get_by_id(environment, db_spine_id): spine_json = { "id": db_spine_id, "type": "switch", "switch": db_spine_id, "aci_document": spine } # Region name is the same as region id region_id = get_object_path_part(leaf_pnic["name_path"], "Regions") region = self.inv.get_by_id(environment, region_id) self.inv.save_inventory_object(o=spine_json, parent=region, environment=environment) # Add downlink and uplink pnics to results list, # including their mutual connection data # (see "connected_to" field). db_downlink_pnic_id = "-".join((db_spine_id, encode_aci_dn(downlink_pnic_id))) db_uplink_pnic_id = "-".join((leaf_pnic["switch"], encode_aci_dn(uplink_pnic_id))) downlink_pnic_json = { "id": db_downlink_pnic_id, "object_name": downlink_pnic_id, "type": "switch_pnic", "role": "downlink", "connected_to": db_uplink_pnic_id, "switch": db_spine_id, "parent_id": db_spine_id, "parent_type": "switch", "aci_document": {} # TODO: what can we add here? } uplink_pnic_json = { "id": db_uplink_pnic_id, "object_name": uplink_pnic_id, "type": "switch_pnic", "role": "uplink", "connected_to": db_downlink_pnic_id, "switch": leaf_switch_id, "parent_id": leaf_switch_id, "parent_type": "switch", "aci_document": {} # TODO: what can we add here? } pnics.extend([downlink_pnic_json, uplink_pnic_json]) return pnics