def _parse(self, data): templates_path = "{}/ttp_templates/L2_Drawer/{}.txt" # process data dictionary if isinstance(data, dict): parser = ttp(vars=self.ttp_vars) for platform_name, text_list in data.items(): ttp_template = N2G_utils.open_ttp_template( self.config, platform_name, templates_path) if not ttp_template: continue parser.add_template(template=ttp_template, template_name=platform_name) for item in text_list: parser.add_input(item, template_name=platform_name) # process directories at OS path elif isinstance(data, str): parser = ttp(vars=self.ttp_vars, base_path=data) # get all sub-folders and load respective templates with os.scandir(data) as dirs: for entry in dirs: if entry.is_dir(): ttp_template = N2G_utils.open_ttp_template( self.config, entry.name, templates_path) if not ttp_template: continue parser.add_template(template=ttp_template, template_name=entry.name) else: log.error("Expecting dictionary or string, but '{}' given".format( type(data))) return parser.parse(one=True) self.parsed_data = parser.result(structure="dictionary")
def _add_link(self, item, hosts, host_data): # skip LAG or MLAG interfaces if self.config["skip_lag"] and ("LAG" in item.get("src_label", "") or "LAG" in item.get("trgt_label", "")): return link_hash = N2G_utils.make_hash_tuple(item) if link_hash not in self.links_dict: self.links_dict[link_hash] = { "source": item["source"], "target": item["target"]["id"], "src_label": item["src_label"], "trgt_label": item["trgt_label"], } # check if need to add interfaces data if self.config.get("add_interfaces_data"): self._add_interfaces_data(item, hosts, host_data, link_hash) # check if need to pre-process lag_links_dict used by add_lag if self.config.get("add_lag"): self._update_lag_links_dict(item, hosts, host_data) # check if need to pre-process nodes_to_links_dict used by group_links if self.config.get("group_links"): self._update_nodes_to_links_dict(item, link_hash) # check if need to combine peers, preprocess combine_peers_dict if self.config.get("combine_peers"): self._update_combine_peers_dict(item, link_hash)
def _add_link(self, item, network=None): link_hash = N2G_utils.make_hash_tuple(item) if link_hash not in self.links_dict: self.links_dict[link_hash] = item # check if need to pre-process nodes_to_links_dict used by group_links if self.config.get("group_links"): self._update_nodes_to_links_dict(item, link_hash) if self.config.get("collapse_ptp") and network: if (network.split("/")[1] in ["30", "31"] and "." in network) or (network.split("/")[1] in ["127"] and ":" in network): self.collapse_ptp_dict.setdefault(network, []).append(link_hash)
def _group_links(self): """ Method to group links between nodes and update links_dict """ # find nodes that have more then 1 link in between, group links for node_pair, link_hashes in self.nodes_to_links_dict.items(): if len(link_hashes) < 2: continue grouped_link = {"source": node_pair[0], "target": node_pair[1]} description = {"grouped_links": {}} links_to_group_count = 0 for link_hash in link_hashes: if link_hash in self.links_dict: links_to_group_count += 1 link_data = self.links_dict[link_hash] src_label = "{}:{}".format(link_data["source"], link_data.get("src_label", "")) trgt_label = "{}:{}".format( link_data["target"], link_data.get("trgt_label", "")) description["grouped_links"][src_label] = trgt_label description["link-{}".format( links_to_group_count)] = json.loads( link_data["description"]) # skip grouping links that does not have any members left in self.links_dict # happens when add_lag pops links or only one link between nodes left if links_to_group_count >= 2: # remove grouped links from links_dict for link_hash in link_hashes: _ = self.links_dict.pop(link_hash, None) # form grouped links description and label grouped_link["description"] = json.dumps(description, sort_keys=True, indent=4, separators=(",", ": ")) grouped_link["label"] = "x{}".format(links_to_group_count) grouped_link_hash = N2G_utils.make_hash_tuple(grouped_link) self.links_dict[grouped_link_hash] = grouped_link del self.nodes_to_links_dict
def _form_base_graph_dict(self): interfaces_ip = {} # need this dict to skip ARP entries for platform, hosts_data in self.parsed_data.items(): for hostname, host_data in hosts_data.items(): self._add_node({ "id": hostname, "top_label": "Device" }, host_data) for interface, interface_data in host_data["interfaces"].items( ): interface_networks = [] for ip in interface_data.get("ip_addresses", []): network = ip["network"] if self.config.get("add_arp") or self.config.get( "add_fhrp"): interface_networks.append(network) interfaces_ip.setdefault(network, []).append(ip["ip"]) network_node = {"id": network, "top_label": "Subnet"} # add bottom lable to node if interface_data.get("port_description" ) and self.config.get("blbl"): if len(interface_data["port_description"] ) > self.config["blbl"]: network_node["bottom_label"] = "{}..".format( interface_data["port_description"] [:self.config["blbl"]]) else: network_node["bottom_label"] = "{}".format( interface_data["port_description"]) link_description_data = { k: v for k, v in interface_data.items() if not k in ["arp", "ip_addresses"] } link_description_data.update(ip) link_description = { "{}:{}".format(hostname, interface): link_description_data } link_dict = { "source": network, "target": hostname, "description": json.dumps( link_description, sort_keys=True, indent=4, separators=(",", ": "), ), } if ip["netmask"] in ["32", "128"]: network_node["top_label"] = interface network_node["bottom_label"] = interface_data.get( "vrf", "global") else: link_label = "{}/{}".format( ip["ip"], ip["netmask"]) if self.config["label_vrf"]: link_label = "{}:{}".format( interface_data.get("vrf", "global"), link_label) if self.config["label_interface"]: link_label = "{}:{}".format( interface, link_label) if self.config.get("lbl_next_to_subnet"): link_dict["src_label"] = link_label else: link_dict["trgt_label"] = link_label # add new node and link to graph self._add_node(network_node) self._add_link(link_dict, network) # check if need to add FHRP IPs if self.config.get("add_fhrp"): interface_network_objects = [ ipaddress.ip_network(i) for i in interface_networks ] for fhrp_entry in interface_data.get("fhrp", []): # get ip entry network ip = fhrp_entry["ip"] ip_obj = ipaddress.ip_address(ip) network = str([ i for i in interface_network_objects if ip_obj in i ][0]) # add IP address node node_id = "{}:{}".format(network, ip) description = { "FHRP:{}:{}".format(hostname, interface): fhrp_entry } self._add_node({ "id": node_id, "top_label": "{} VIP".format(fhrp_entry["type"]), "label": ip, "description": json.dumps( description, sort_keys=True, indent=4, separators=(",", ": "), ), }) # add link to network link_dict = {"source": node_id, "target": network} self._add_link(link_dict) # check if need to add ARP to diagram if self.config.get("add_arp"): interface_network_objects = [ ipaddress.ip_network(i) for i in interface_networks ] for arp_entry in interface_data.get("arp", []): # get arp entry network ip = arp_entry["ip"] ip_obj = ipaddress.ip_address(ip) network = str([ i for i in interface_network_objects if ip_obj in i ][0]) # add IP address node node_id = "{}:{}".format(network, ip) description = { "ARP:{}:{}".format(hostname, interface): arp_entry } self._add_node({ "id": node_id, "top_label": "ARP entry", "label": ip, "description": json.dumps( description, sort_keys=True, indent=4, separators=(",", ": "), ), }) # add link to network link_dict = {"source": node_id, "target": network} self._add_link(link_dict) # clean up ARP entries that duplicate interface IPs if self.config.get("add_arp"): for network, ips in interfaces_ip.items(): for ip in ips: arp_node_id = "{}:{}".format(network, ip) link_hash = N2G_utils.make_hash_tuple({ "source": arp_node_id, "target": network }) _ = self.nodes_dict.pop(arp_node_id, None) _ = self.links_dict.pop(link_hash, None)
def _combine_peers(self): """ self.combine_peers_dict is a dictionary of {("hostname", "interface"): [links_hashes]} if length of [links_hashes] is more than 1, wehave several LLDP/CDP peers behind that port, usually happens with VMs sitting on host or some form of VPLS transport - we have L2 domain in between this port and CDP/LLDP peer. This method will add new node to diagram with "L2" label and "hostname:interface" id, connecting all CDP/LLDP peers to it. That is to reduce cluttering and improve readability. """ for port_id, links_hashes in self.combine_peers_dict.items(): # port_id - tuple of ("hostname", "interface") if len(links_hashes) < 2: continue # add L2 node l2_node_id = "{}:{}:L2_Node".format(*port_id) l2_node_port_id = "{}:{}".format(*port_id) self._add_node( item={ "id": l2_node_id, "label": "L2", "shape_type": "ellipse", "height": 40, "width": 40 }, host_data={}, ) # add link to L2 node link_to_l2_node = { "source": port_id[0], "target": l2_node_id, "src_label": port_id[1], "description": { l2_node_port_id: {} } } # get interface data from parsing results to add it to description for platform, hosts in self.parsed_data.items(): try: if port_id[1].startswith("MLAG"): link_to_l2_node["description"] = { l2_node_port_id: hosts[port_id[0]]["interfaces"][port_id[1].replace( "MLAG", "LAG")] } else: link_to_l2_node["description"] = { l2_node_port_id: hosts[port_id[0]]["interfaces"][port_id[1]] } break except: continue link_to_l2_node["description"] = json.dumps( link_to_l2_node["description"], sort_keys=True, indent=4, separators=(",", ": "), ) link_to_l2_node_hash = N2G_utils.make_hash_tuple(link_to_l2_node) self.links_dict[link_to_l2_node_hash] = link_to_l2_node # connect CDP/LLDP peers to L2 node for link_hash in links_hashes: # link might be deleted from links_dict by add_lag if link_hash in self.links_dict: old_link = self.links_dict.pop(link_hash) _ = old_link.pop("src_label", None) # remove upstream peer interface details from description old_link_data = json.loads(old_link.get("description", {})) _ = old_link_data.pop(l2_node_port_id, None) old_link["description"] = json.dumps( old_link_data, sort_keys=True, indent=4, separators=(",", ": "), ) # form new link old_link["source"] = l2_node_id new_link_hash = N2G_utils.make_hash_tuple(old_link) self.links_dict[new_link_hash] = old_link # need to remove this L2 node and links to it as it # turned out links are part of LAG, as a result node # will be added for LAG together with links to peers elif self.config.get("add_lag"): _ = self.links_dict.pop(link_to_l2_node_hash) _ = self.nodes_dict.pop(l2_node_id) break del self.combine_peers_dict
def _add_all_connected(self): """ Method to iterate over all interfaces and fine theones that are in up state but having no CDP/LLDP peers, add nodes connected to such interfaces to graph """ for platform, hosts in self.parsed_data.items(): for hostname, host_data in hosts.items(): for intf_name, intf_data in host_data["interfaces"].items(): # skip links that are not up line = intf_data.get("state", {}).get("line", "").lower() if not "up" in line: continue # skip non-physical ports if not intf_data["state"].get("is_physical_port"): continue # check if interface has CDP or LLDP peers: has_cdp_or_lldp_peer = False for item in host_data.get("cdp_peers", []): if item["src_label"] == intf_name: has_cdp_or_lldp_peer = True if has_cdp_or_lldp_peer: continue for item in host_data.get("lldp_peers", []): if item["src_label"] == intf_name: has_cdp_or_lldp_peer = True if has_cdp_or_lldp_peer: continue # create new node and add it to graph node_id = "{}:{}".format(hostname, intf_name) node = {"id": node_id, "label": "Unknown"} if intf_data.get("description"): node["bottom_label"] = "{}..".format( intf_data["description"][:20]) self._add_node(node, host_data={}) # add link to graph link = {"source": hostname} if "lag_id" in intf_data and self.config["add_lag"]: lag_intf_name = "LAG{}".format(intf_data["lag_id"]) src_if = "{}:{}".format(hostname, lag_intf_name) lag_intf_data = host_data["interfaces"].get( lag_intf_name, {}) if "mlag_id" in lag_intf_data: lag_intf_name = "MLAG{}".format( lag_intf_data["mlag_id"]) link["src_label"] = lag_intf_name link["description"] = json.dumps( { src_if: lag_intf_data, "lag_members": { "{}:{}".format(hostname, intf_name): "" }, }, sort_keys=True, indent=4, separators=(",", ": "), ) # update node bottom label as per lag interface description node["bottom_label"] = ("{}..".format( lag_intf_data["description"][:20]) if lag_intf_data else node["bottom_label"]) # remove previous node that had ID based on lag member interface node = self.nodes_dict.pop(node_id, {}) new_node_id = "{}:{}".format(hostname, lag_intf_name) node["id"] = new_node_id link["target"] = new_node_id self._add_node(node, host_data={}) else: src_if = "{}:{}".format(hostname, intf_name) link["target"] = node_id link["src_label"] = intf_name link["description"] = json.dumps( {src_if: intf_data}, sort_keys=True, indent=4, separators=(",", ": "), ) link_hash = N2G_utils.make_hash_tuple(link) if link_hash not in self.links_dict: self.links_dict[link_hash] = link else: link_data = json.loads( self.links_dict[link_hash]["description"]) if "lag_members" in link_data: link_data["lag_members"].update( {"{}:{}".format(hostname, intf_name): ""}) self.links_dict[link_hash]["description"] = json.dumps( link_data, sort_keys=True, indent=4, separators=(",", ": "), )
def _update_lag_links_dict(self, item, hosts, host_data): """ Method to form and add LAG link to lag_links_dict """ lag_link = {} src = item["source"] tgt = item["target"]["id"] src_intf_name = item["src_label"] src_intf_data = host_data.get("interfaces", {}).get(src_intf_name, {}) tgt_intf_name = item["trgt_label"] tgt_intf_data = hosts.get(tgt, {}).get("interfaces", {}).get(tgt_intf_name, {}) if "lag_id" in src_intf_data: src_lag_name = "LAG{}".format(src_intf_data["lag_id"]) src_lag_data = host_data.get("interfaces", {}).get(src_lag_name, {}) if "mlag_id" in src_lag_data: src_lag_name = "MLAG{}".format(src_lag_data["mlag_id"]) lag_link.update({ "source": src, "target": tgt, "src_label": src_lag_name, "description": { "{}:{}".format(src, src_lag_name): src_lag_data }, }) if "lag_id" in tgt_intf_data: tgt_lag_name = "LAG{}".format(tgt_intf_data["lag_id"]) tgt_lag_data = (hosts.get(tgt, {}).get("interfaces", {}).get(tgt_lag_name, {})) if "mlag_id" in tgt_lag_data: tgt_lag_name = "MLAG{}".format(tgt_lag_data["mlag_id"]) lag_link.update({ "source": src, "target": tgt, "trgt_label": tgt_lag_name }) lag_link.setdefault("description", {}) lag_link["description"].update( {"{}:{}".format(tgt, tgt_lag_name): tgt_lag_data}) # add lag link to links dictionary and remove members from links_dict if lag_link: lag_link_hash = N2G_utils.make_hash_tuple(lag_link) src_member_intf_name = "{}:{}".format(src, src_intf_name) tgt_member_intf_name = "{}:{}".format(tgt, tgt_intf_name) member_link = {src_member_intf_name: tgt_member_intf_name} if lag_link_hash not in self.lag_links_dict: lag_link["description"]["lag_members"] = member_link self.lag_links_dict[lag_link_hash] = lag_link else: added_lag_members = self.lag_links_dict[lag_link_hash][ "description"]["lag_members"] # only update members if opposite end not added already if not tgt_member_intf_name in added_lag_members: self.lag_links_dict[lag_link_hash]["description"][ "lag_members"].update(member_link) # remove member interfaces from links dictionary members_hash = N2G_utils.make_hash_tuple(item) _ = self.links_dict.pop(members_hash, None) # check if need to combine peers, add lag to combine_peers_dict if self.config.get("combine_peers"): self._update_combine_peers_dict(item=lag_link, link_hash=lag_link_hash)