def print_config(self, lm_config: lm_types.LinkMonitorConfig): caption = "Link Monitor parameters stored" rows = [] rows.append([ "isOverloaded: {}".format( "Yes" if lm_config.isOverloaded else "No") ]) rows.append(["nodeLabel: {}".format(lm_config.nodeLabel)]) rows.append([ "overloadedLinks: {}".format(", ".join(lm_config.overloadedLinks)) ]) print(printing.render_vertical_table(rows, caption=caption)) print(printing.render_vertical_table([["linkMetricOverrides:"]])) column_labels = ["Interface", "Metric Override"] rows = [] for (k, v) in sorted(lm_config.linkMetricOverrides.items()): rows.append([k, v]) print( printing.render_horizontal_table(rows, column_labels=column_labels)) print(printing.render_vertical_table([["adjMetricOverrides:"]])) column_labels = ["Adjacency", "Metric Override"] rows = [] for (k, v) in sorted(lm_config.adjMetricOverrides.items()): adj_str = k.nodeName + " " + k.ifName rows.append([adj_str, v]) print( printing.render_horizontal_table(rows, column_labels=column_labels))
def print_stats(self, stats_templates, counters): """ Print in pretty format """ suffixes = ["60", "600", "3600", "0"] for template in stats_templates: counters_rows = [] for title, key in template["counters"]: val = counters.get(key, None) counters_rows.append([title, "N/A" if not val else val.value]) stats_cols = ["Stat", "1 min", "10 mins", "1 hour", "All Time"] stats_rows = [] for title, key_prefix in template["stats"]: row = [title] for key in ["{}.{}".format(key_prefix, s) for s in suffixes]: val = counters.get(key, None) row.append("N/A" if not val else val.value) stats_rows.append(row) print("> {} ".format(template["title"])) if counters_rows: print() print( printing.render_horizontal_table( counters_rows, tablefmt="plain").strip("\n")) if stats_rows: print() print( printing.render_horizontal_table( stats_rows, column_labels=stats_cols, tablefmt="simple").strip("\n"))
def print_config(self, lm_config): caption = 'Link Monitor parameters stored' rows = [] rows.append([ 'isOverloaded: {}'.format( 'Yes' if lm_config.isOverloaded else 'No') ]) rows.append(['nodeLabel: {}'.format(lm_config.nodeLabel)]) rows.append([ 'overloadedLinks: {}'.format(', '.join(lm_config.overloadedLinks)) ]) print(printing.render_vertical_table(rows, caption=caption)) print(printing.render_vertical_table([['linkMetricOverrides:']])) column_labels = ['Interface', 'Metric Override'] rows = [] for (k, v) in sorted(lm_config.linkMetricOverrides.items()): rows.append([k, v]) print( printing.render_horizontal_table(rows, column_labels=column_labels)) print(printing.render_vertical_table([['adjMetricOverrides:']])) column_labels = ['Adjacency', 'Metric Override'] rows = [] for (k, v) in sorted(lm_config.adjMetricOverrides.items()): adj_str = k.nodeName + ' ' + k.ifName rows.append([adj_str, v]) print( printing.render_horizontal_table(rows, column_labels=column_labels))
def print_links_table(interfaces, caption=None): """ @param interfaces: dict<interface-name, InterfaceDetail> @param caption: Caption to show on table name """ rows = [] columns = ["Interface", "Status", "Metric Override", "Addresses"] for (k, v) in sorted(interfaces.items()): metric_override = v.metricOverride if v.metricOverride else "" if v.info.isUp: backoff_sec = v.linkFlapBackOffMs / 1000 state = 'Up' if backoff_sec == 0 else click.style( 'Hold ({} s)'.format(backoff_sec), fg='yellow') else: state = click.style('Down', fg='red') if v.isOverloaded: metric_override = click.style("Overloaded", fg="red") rows.append([k, state, metric_override, ""]) firstAddr = True for prefix in v.info.networks: addrStr = ipnetwork.sprint_addr(prefix.prefixAddress.addr) if firstAddr: rows[-1][3] = addrStr firstAddr = False else: rows.append(["", "", "", addrStr]) print(printing.render_horizontal_table(rows, columns, caption)) print()
def print_paths(self, paths): if not paths: print("No paths are found!") return paths = self.calculate_hop_metric(paths) column_labels = ["Hop", "NextHop Node", "Interface", "Metric", "NextHop-v6"] print( "{} {} found.".format( len(paths), "path is" if len(paths) == 1 else "paths are" ) ) for idx, path in enumerate(paths): print( printing.render_horizontal_table( path[1], column_labels, caption="Path {}{}".format(idx + 1, " *" if path[0] else ""), tablefmt="plain", ) ) print()
def sprint_adj_db_full(global_adj_db, adj_db, bidir): """ given serialized adjacency database, print neighbors. Use the global adj database to validate bi-dir adjacencies :param global_adj_db map(str, AdjacencyDatabase): map of node names to their adjacent node names :param adj_db lsdb_types.AdjacencyDatabase: latest from kv store :param bidir bool: only print bidir adjacencies :return [str]: list of string to be printed """ assert isinstance(adj_db, lsdb_types.AdjacencyDatabase) this_node_name = adj_db.thisNodeName node_label_str = "Node Label: {}".format(adj_db.nodeLabel) rows = [] column_labels = [ "Neighbor", "Local Intf", "Remote Intf", "Metric", "Label", "NextHop-v4", "NextHop-v6", "Uptime", ] for adj in adj_db.adjacencies: if bidir: other_node_db = global_adj_db.get(adj.otherNodeName, None) if other_node_db is None: continue other_node_neighbors = { a.otherNodeName for a in other_node_db.adjacencies } if this_node_name not in other_node_neighbors: continue nh_v6 = ipnetwork.sprint_addr(adj.nextHopV6.addr) nh_v4 = ipnetwork.sprint_addr(adj.nextHopV4.addr) overload_status = click.style("Overloaded", fg="red") metric = overload_status if adj.isOverloaded else adj.metric uptime = time_since(adj.timestamp) if adj.timestamp else "" rows.append([ adj.otherNodeName, adj.ifName, adj.otherIfName, metric, adj.adjLabel, nh_v4, nh_v6, uptime, ]) return node_label_str, printing.render_horizontal_table( rows, column_labels)
def print_links_table(interfaces, caption=None): ''' @param interfaces: dict<interface-name, InterfaceDetail> @param caption: Caption to show on table name ''' rows = [] columns = ['Interface', 'Status', 'Metric Override', 'Addresses'] for (k, v) in sorted(interfaces.items()): state = 'Up' if v.info.isUp else click.style('Down', fg='red') metric_override = v.metricOverride if v.metricOverride else '' if v.isOverloaded: metric_override = click.style('Overloaded', fg='red') rows.append([k, state, metric_override, '']) firstAddr = True for prefix in (v.info.networks): addrStr = ipnetwork.sprint_addr(prefix.prefixAddress.addr) if firstAddr: rows[-1][3] = addrStr firstAddr = False else: rows.append(['', '', '', addrStr]) print(printing.render_horizontal_table(rows, columns, caption)) print()
def print_kvstore_nodes(self, resp, host_id): ''' print prefixes from raw publication from KvStore :param resp kv_store_types.Publication: pub from kv store ''' def _parse_nodes(rows, value): prefix_db = serializer.deserialize_thrift_object( value.value, lsdb_types.PrefixDatabase) marker = '* ' if prefix_db.thisNodeName == host_id else '> ' row = ["{}{}".format(marker, prefix_db.thisNodeName)] loopback_prefixes = [ p.prefix for p in prefix_db.prefixEntries if p.type == lsdb_types.PrefixType.LOOPBACK ] loopback_prefixes.sort(key=lambda x: len(x.prefixAddress.addr), reverse=True) row.extend([utils.sprint_prefix(p) for p in loopback_prefixes]) rows.append(row) rows = [] self.iter_publication(rows, resp, set(['all']), _parse_nodes) label = ['Node', 'V6-Loopback'] # if any node has a v4 loopback addr, we should have the v4-addr column if any(len(row) > 2 for row in rows): label.append('V4-Loopback') print(printing.render_horizontal_table(rows, label))
def print_kvstore_nodes(self, resp, host_id): ''' print prefixes from raw publication from KvStore :param resp kv_store_types.Publication: pub from kv store ''' def _parse_nodes(rows, value): prefix_db = deserialize_thrift_object(value.value, lsdb_types.PrefixDatabase) prefix_strs = utils.sprint_prefixes_db_full(prefix_db, True) marker = '* ' if prefix_db.thisNodeName == host_id else '> ' row = ["{}{}".format(marker, prefix_db.thisNodeName)] row.extend(prefix_strs) rows.append(row) rows = [] self.iter_publication(rows, resp, set(['all']), _parse_nodes) label = ['Node', 'V6-Loopback'] # if any node has a v4 loopback addr, we should have the v4-addr column if any(len(row) > 2 for row in rows): label.append('V4-Loopback') print(printing.render_horizontal_table(rows, label))
def print_links_table(interfaces, caption=None): ''' @param interfaces: dict<interface-name, InterfaceDetail> @param caption: Caption to show on table name ''' rows = [] columns = [ 'Interface', 'Status', 'Overloaded', 'Metric Override', 'ifIndex', 'Addresses' ] for (k, v) in sorted(interfaces.items()): state = 'Up' if v.info.isUp else 'Down' overloaded = 'True' if v.isOverloaded else '' metric_override = v.metricOverride if v.metricOverride else '' index = v.info.ifIndex rows.append([k, state, overloaded, metric_override, index, '']) firstAddr = True for a in (v.info.v4Addrs + v.info.v6LinkLocalAddrs): addrStr = utils.sprint_addr(a.addr) if firstAddr: rows[-1][5] = addrStr firstAddr = False else: rows.append(['', '', '', '', '', addrStr]) print(printing.render_horizontal_table(rows, columns, caption)) print()
def print_kvstore_keys(self, resp, ttl): ''' print keys from raw publication from KvStore :param resp kv_store_types.Publication: pub from kv store :param ttl bool: Show ttl value and version if True ''' rows = [] for key, value in sorted(resp.keyVals.items(), key=lambda x: x[0]): if ttl: if value.ttl != Consts.CONST_TTL_INF: ttlStr = str(datetime.timedelta(milliseconds=value.ttl)) else: ttlStr = "Inf" rows.append([ key, value.originatorId, value.version, value.hash, ttlStr, value.ttlVersion ]) else: rows.append( [key, value.originatorId, value.version, value.hash]) caption = "Available keys in KvStore" column_labels = ["Key", "OriginatorId", "Version", "Hash"] if ttl: column_labels = column_labels + ["TTL (HH:MM:SS)", "TTL Version"] print(printing.render_horizontal_table(rows, column_labels, caption))
def sprint_prefixes_db_full(prefix_db, loopback_only=False): """ given serialized prefixes output an array of lines representing those prefixes. IPV6 prefixes come before IPV4 prefixes. :prefix_db lsdb_types.PrefixDatabase: prefix database :loopback_only : is only loopback address expected :return [str]: the array of prefix strings """ prefix_strs = [] sorted_entries = sorted( sorted(prefix_db.prefixEntries, key=lambda x: x.prefix.prefixLength), key=lambda x: x.prefix.prefixAddress.addr, ) for prefix_entry in sorted_entries: if loopback_only and prefix_entry.type is not network_types.PrefixType.LOOPBACK: continue prefix_strs.append([ ipnetwork.sprint_prefix(prefix_entry.prefix), ipnetwork.sprint_prefix_type(prefix_entry.type), ipnetwork.sprint_prefix_forwarding_type( prefix_entry.forwardingType), ]) return printing.render_horizontal_table( prefix_strs, ["Prefix", "Client Type", "Forwarding Type"])
def compare_route_db(routes_a, routes_b, sources, enable_color, quiet=False): extra_routes_in_a = routes_difference(routes_a, routes_b) extra_routes_in_b = routes_difference(routes_b, routes_a) diff_prefixes = prefixes_with_different_nexthops(routes_a, routes_b) # return error type error_msg = [] # if all good, then return early if not extra_routes_in_a and not extra_routes_in_b and not diff_prefixes: if not quiet: if enable_color: click.echo(click.style('PASS', bg='green', fg='black')) else: click.echo('PASS') print('{} and {} routing table match'.format(*sources)) return True, error_msg # Something failed.. report it if not quiet: if enable_color: click.echo(click.style('FAIL', bg='red', fg='black')) else: click.echo('FAIL') print('{} and {} routing table do not match'.format(*sources)) if extra_routes_in_a: caption = 'Routes in {} but not in {}'.format(*sources) if not quiet: print_routes(caption, extra_routes_in_a) else: error_msg.append(caption) if extra_routes_in_b: caption = 'Routes in {} but not in {}'.format(*reversed(sources)) if not quiet: print_routes(caption, extra_routes_in_b) else: error_msg.append(caption) if diff_prefixes: caption = 'Prefixes have different nexthops in {} and {}'.format(*sources) rows = [] for prefix, lhs_nexthops, rhs_nexthops in diff_prefixes: rows.append([ prefix, ', '.join(lhs_nexthops), ', '.join(rhs_nexthops), ]) column_labels = ['Prefix'] + sources if not quiet: print(printing.render_horizontal_table( rows, column_labels, caption=caption, )) else: error_msg.append(caption) return False, error_msg
def print_allocations_table(alloc_str): """ print static allocations """ rows = [] allocations = deserialize_thrift_object(alloc_str, alloc_types.StaticAllocation) for node, prefix in allocations.nodePrefixes.items(): rows.append([node, ipnetwork.sprint_prefix(prefix)]) print(printing.render_horizontal_table(rows, ["Node", "Prefix"]))
def print_kvstore_keys( self, resp: Dict[str, kv_store_types.Publication], ttl: bool, json: bool ) -> None: """ print keys from raw publication from KvStore""" # Export in json format if enabled if json: all_kv = {} for _, kv in resp.items(): all_kv.update(kv.keyVals) # Force set value to None for value in all_kv.values(): value.value = None data = {} for k, v in all_kv.items(): data[k] = utils.thrift_to_dict(v) print(utils.json_dumps(data)) return rows = [] db_bytes = 0 num_keys = 0 for area in resp: keyVals = resp[area].keyVals num_keys += len(keyVals) area_str = "N/A" if area is None else area for key, value in sorted(keyVals.items(), key=lambda x: x[0]): # 32 bytes comes from version, ttlVersion, ttl and hash which are i64 kv_size = 32 + len(key) + len(value.originatorId) + len(value.value) db_bytes += kv_size hash_offset = "+" if value.hash > 0 else "" row = [ key, value.originatorId, value.version, f"{hash_offset}{value.hash:x}", printing.sprint_bytes(kv_size), area_str, ] if ttl: ttlStr = ( "Inf" if value.ttl == Consts.CONST_TTL_INF else str(datetime.timedelta(milliseconds=value.ttl)) ) row.append(f"{ttlStr} - {value.ttlVersion}") rows.append(row) db_bytes_str = printing.sprint_bytes(db_bytes) caption = f"KvStore Data - {num_keys} keys, {db_bytes_str}" column_labels = ["Key", "Originator", "Ver", "Hash", "Size", "Area"] if ttl: column_labels = column_labels + ["TTL - Ver"] print(printing.render_horizontal_table(rows, column_labels, caption))
def print_adjs_table(adjs_map, enable_color, neigh=None, interface=None): ''' print adjacencies :param adjacencies as list of dict ''' column_labels = [ 'Neighbor', 'Local Interface', 'Remote Interface', 'Metric', 'Weight', 'Adj Label', 'NextHop-v4', 'NextHop-v6', 'Uptime' ] output = [] adj_found = False for node, val in sorted(adjs_map.items()): # report overloaded status in color is_overloaded = val['overloaded'] # report overloaded status in color overload_color = 'red' if is_overloaded else 'green' overload_status = click.style('{}'.format(is_overloaded), fg=overload_color) cap = "{}'s adjacencies, version: {}, Node Label: {}, " "Overloaded?: {}".format( node, val['version'] if 'version' in val else 'N/A', val['node_label'], overload_status if enable_color else ('TRUE' if is_overloaded else 'FALSE')) # horizontal adj table for a node rows = [] seg = '' for adj in sorted(val['adjacencies'], key=lambda adj: adj['otherNodeName']): # filter if set if neigh is not None and interface is not None: if neigh == adj['otherNodeName'] and interface == adj['ifName']: adj_found = True else: continue overload_status = click.style('Overloaded', fg='red') metric = (overload_status if enable_color else 'OVERLOADED') if adj['isOverloaded'] else adj['metric'] uptime = time_since(adj['timestamp']) if adj['timestamp'] else '' rows.append([ adj['otherNodeName'], adj['ifName'], adj['otherIfName'], metric, adj['weight'], adj['adjLabel'], adj['nextHopV4'], adj['nextHopV6'], uptime ]) seg = printing.render_horizontal_table(rows, column_labels, tablefmt='plain') output.append([cap, seg]) if neigh is not None and interface is not None and not adj_found: print('Adjacency with {} {} is not formed.'.format(neigh, interface)) return print(printing.render_vertical_table(output))
def print_allocations_table(alloc_str): ''' print static allocations ''' rows = [] allocations = deserialize_thrift_object( alloc_str, alloc_types.StaticAllocation) for node, prefix in allocations.nodePrefixes.items(): rows.append([node, sprint_prefix(prefix)]) print(printing.render_horizontal_table(rows, ['Node', 'Prefix']))
def run(self): resp = self.client.view_prefix() rows = [] for prefix_entry in resp.prefixes: prefix_str = ipnetwork.sprint_prefix(prefix_entry.prefix) prefix_type = ipnetwork.sprint_prefix_type(prefix_entry.type) rows.append((prefix_type, prefix_str)) print('\n', printing.render_horizontal_table(rows, ['Type', 'Prefix'])) print()
def compare_route_db(routes_a, routes_b, sources, enable_color, quiet=False): extra_routes_in_a = routes_difference(routes_a, routes_b) extra_routes_in_b = routes_difference(routes_b, routes_a) diff_prefixes = prefixes_with_different_nexthops(routes_a, routes_b) # return error type error_msg = [] # if all good, then return early if not extra_routes_in_a and not extra_routes_in_b and not diff_prefixes: if not quiet: if enable_color: click.echo(click.style("PASS", bg="green", fg="black")) else: click.echo("PASS") print("{} and {} routing table match".format(*sources)) return True, error_msg # Something failed.. report it if not quiet: if enable_color: click.echo(click.style("FAIL", bg="red", fg="black")) else: click.echo("FAIL") print("{} and {} routing table do not match".format(*sources)) if extra_routes_in_a: caption = "Routes in {} but not in {}".format(*sources) if not quiet: print_unicast_routes(caption, extra_routes_in_a) else: error_msg.append(caption) if extra_routes_in_b: caption = "Routes in {} but not in {}".format(*reversed(sources)) if not quiet: print_unicast_routes(caption, extra_routes_in_b) else: error_msg.append(caption) if diff_prefixes: caption = "Prefixes have different nexthops in {} and {}".format( *sources) rows = [] for prefix, lhs_nexthops, rhs_nexthops in diff_prefixes: rows.append( [prefix, ", ".join(lhs_nexthops), ", ".join(rhs_nexthops)]) column_labels = ["Prefix"] + sources if not quiet: print( printing.render_horizontal_table(rows, column_labels, caption=caption)) else: error_msg.append(caption) return False, error_msg
def sprint_interface_db_delta(new_intf_db, old_intf_db): ''' Print delta between new and old interface db @param new_intf_db: InterfaceDatabase.bunch @param old_intf_db: InterfaceDatabase.bunch ''' assert(new_intf_db is not None) assert(old_intf_db is not None) new_intfs = set(new_intf_db.interfaces.keys()) old_intfs = set(old_intf_db.interfaces.keys()) added_intfs = new_intfs - old_intfs removed_intfs = old_intfs - new_intfs updated_intfs = new_intfs & old_intfs lines = [] for intf_name in added_intfs: lines.append('INTERFACE_ADDED: {}\n'.format(intf_name)) intf = new_intf_db.interfaces[intf_name] rows = [] for k in sorted(intf.keys()): rows.append([k, "", "-->", intf.get(k)]) lines.append(printing.render_horizontal_table(rows, tablefmt='plain')) for intf_name in removed_intfs: lines.append('INTERFACE_REMOVED: {}\n'.format(intf_name)) for intf_name in updated_intfs: new_intf = new_intf_db.interfaces[intf_name] old_intf = old_intf_db.interfaces[intf_name] if new_intf == old_intf: continue lines.append('INTERFACE_UPDATED: {}'.format(intf_name)) rows = [] for k in sorted(new_intf.keys()): if old_intf.get(k) != new_intf.get(k): rows.append([k, old_intf.get(k), "-->", new_intf.get(k)]) lines.append(printing.render_horizontal_table(rows, tablefmt='plain')) return lines
def print_kvstore_nodes( self, connected_nodes: Set[str], prefix_keys: openr_types.Publication, host_id: str, node_area: Dict[str, str] = None, ) -> None: """ Print kvstore nodes information. Their loopback and reachability information. """ def _parse_loopback_addrs(addrs, value): v4_addrs = addrs["v4"] v6_addrs = addrs["v6"] prefix_db = serializer.deserialize_thrift_object( value.value, openr_types.PrefixDatabase) for prefixEntry in prefix_db.prefixEntries: p = prefixEntry.prefix if prefixEntry.type != network_types.PrefixType.LOOPBACK: continue if len(p.prefixAddress.addr) == 16 and p.prefixLength == 128: v6_addrs[prefix_db.thisNodeName] = ipnetwork.sprint_prefix( p) if len(p.prefixAddress.addr) == 4 and p.prefixLength == 32: v4_addrs[prefix_db.thisNodeName] = ipnetwork.sprint_prefix( p) # Extract loopback addresses addrs = {"v4": {}, "v6": {}} self.iter_publication(addrs, prefix_keys, {"all"}, _parse_loopback_addrs) # Create rows to print rows = [] for node in set(list(addrs["v4"].keys()) + list(addrs["v6"].keys())): marker = "* " if node == host_id else "> " loopback_v4 = addrs["v4"].get(node, "N/A") loopback_v6 = addrs["v6"].get(node, "N/A") area_str = node_area.get(node, "N/A") if node_area is not None else "N/A" rows.append([ f"{marker}{node}", loopback_v6, loopback_v4, "Reachable" if node in connected_nodes else "Unreachable", area_str, ]) label = ["Node", "V6-Loopback", "V4-Loopback", "Status", "Area"] print(printing.render_horizontal_table(rows, label))
def print_links_table(cls, interfaces, caption=None, stylish=False): """ @param interfaces: dict<interface-name, InterfaceDetail> @param caption: Caption to show on table name """ columns = ["Interface", "Status", "Metric Override", "Addresses"] rows = cls.build_table_rows(interfaces, stylish) print(printing.render_horizontal_table(rows, columns, caption)) print()
def print_counters(self, counters): ''' print the Fib counters ''' host_id = utils.get_connected_node_name(self.client.host, self.lm_cmd_port) caption = '{}\'s Fib counters'.format(host_id) rows = [] for key in counters: rows.append(['{} : {}'.format(key, counters[key])]) print(printing.render_horizontal_table(rows, caption=caption, tablefmt='plain')) print()
def print_kvstore_keys(self, resp, ttl, json_fmt): """ print keys from raw publication from KvStore :param resp kv_store_types.Publication: pub from kv store :param ttl bool: Show ttl value and version if True """ # Force set value to None for value in resp.keyVals.values(): value.value = None # Export in json format if enabled if json_fmt: data = {} for k, v in resp.keyVals.items(): data[k] = utils.thrift_to_dict(v) print(utils.json_dumps(data)) return rows = [] for key, value in sorted(resp.keyVals.items(), key=lambda x: x[0]): hash_offset = "+" if value.hash > 0 else "" if ttl: if value.ttl != Consts.CONST_TTL_INF: ttlStr = str(datetime.timedelta(milliseconds=value.ttl)) else: ttlStr = "Inf" rows.append( [ key, value.originatorId, value.version, "{}{:x}".format(hash_offset, value.hash), "{} - {}".format(ttlStr, value.ttlVersion), ] ) else: rows.append( [ key, value.originatorId, value.version, "{}{:x}".format(hash_offset, value.hash), ] ) caption = "Available keys in KvStore" column_labels = ["Key", "Originator", "Ver", "Hash"] if ttl: column_labels = column_labels + ["TTL - Ver"] print(printing.render_horizontal_table(rows, column_labels, caption))
def run(self, json): openr_version = self.client.get_openr_version() if json: version = utils.thrift_to_dict(openr_version) print(utils.json_dumps(version)) else: rows = [] rows.append(['Current Version', ':', openr_version.version]) rows.append(['Lowest Supported Version', ':', openr_version.lowestSupportedVersion]) print(printing.render_horizontal_table( rows, column_labels=[], tablefmt='plain'))
def sprint_adj_db_delta(new_adj_db, old_adj_db): """ given serialized adjacency database, print neighbors delta as compared to the supplied global state :param new_adj_db lsdb_types.AdjacencyDatabase: latest from kv store :param old_adj_db lsdb_types.AdjacencyDatabase: last one we had :return [str]: list of string to be printed """ # check for deltas between old and new # first check for changes in the adjacencies lists adj_list_deltas = find_adj_list_deltas( old_adj_db.adjacencies, new_adj_db.adjacencies ) strs = [] for change_type, old_adj, new_adj in adj_list_deltas: if change_type == "NEIGHBOR_DOWN": strs.append( "{}: {} via {}".format( change_type, old_adj.otherNodeName, old_adj.ifName ) ) if change_type == "NEIGHBOR_UP" or change_type == "NEIGHBOR_UPDATE": strs.append( "{}: {} via {}\n{}".format( change_type, new_adj.otherNodeName, new_adj.ifName, sprint_adj_delta(old_adj, new_adj), ) ) # check for other adjDB changes old_db_dict = copy.copy(old_adj_db).__dict__ old_db_dict.pop("adjacencies", None) old_db_dict.pop("perfEvents", None) new_db_dict = copy.copy(new_adj_db).__dict__ new_db_dict.pop("adjacencies", None) new_db_dict.pop("perfEvents", None) if new_db_dict != old_db_dict: rows = [] strs.append("ADJ_DB_UPDATE: {}".format(new_adj_db.thisNodeName)) for k in sorted(new_db_dict.keys()): if old_db_dict.get(k) != new_db_dict.get(k): rows.append([k, old_db_dict.get(k), "-->", new_db_dict.get(k)]) strs.append(printing.render_horizontal_table(rows, tablefmt="plain")) return strs
def print_stats(self, stats_templates, counters): ''' Print in pretty format ''' suffixes = ['60', '600', '3600', '0'] for template in stats_templates: counters_rows = [] for title, key in template['counters']: val = counters.get(key, None) counters_rows.append([title, 'N/A' if not val else val.value]) stats_cols = ['Stat', '1 min', '10 mins', '1 hour', 'All Time'] stats_rows = [] for title, key_prefix in template['stats']: row = [title] for key in ['{}.{}'.format(key_prefix, s) for s in suffixes]: val = counters.get(key, None) row.append('N/A' if not val else val.value) stats_rows.append(row) print('> {} '.format(template['title'])) if counters_rows: print() print( printing.render_horizontal_table( counters_rows, tablefmt='plain', ).strip('\n')) if stats_rows: print() print( printing.render_horizontal_table( stats_rows, column_labels=stats_cols, tablefmt='simple', ).strip('\n'))
def _run(self, client: OpenrCtrl.Client, json: bool) -> None: openr_version = client.getOpenrVersion() build_info = client.getBuildInfo() if json: if build_info.buildPackageName: info = utils.thrift_to_dict(build_info) print(utils.json_dumps(info)) version = utils.thrift_to_dict(openr_version) print(utils.json_dumps(version)) else: if build_info.buildPackageName: print("Build Information") print(" Built by: {}".format(build_info.buildUser)) print(" Built on: {}".format(build_info.buildTime)) print(" Built at: {}".format(build_info.buildHost)) print(" Build path: {}".format(build_info.buildPath)) print(" Package Name: {}".format(build_info.buildPackageName)) print(" Package Version: {}".format(build_info.buildPackageVersion)) print(" Package Release: {}".format(build_info.buildPackageRelease)) print(" Build Revision: {}".format(build_info.buildRevision)) print( " Build Upstream Revision: {}".format( build_info.buildUpstreamRevision ) ) print(" Build Platform: {}".format(build_info.buildPlatform)) print( " Build Rule: {} ({}, {}, {})".format( build_info.buildRule, build_info.buildType, build_info.buildTool, build_info.buildMode, ) ) rows = [] rows.append(["Open Source Version", ":", openr_version.version]) rows.append( [ "Lowest Supported Open Source Version", ":", openr_version.lowestSupportedVersion, ] ) print( printing.render_horizontal_table( rows, column_labels=[], tablefmt="plain" ) )
def _run(self, client: OpenrCtrl.Client) -> None: prefixes = client.getPrefixes() rows = [] for prefix_entry in prefixes: prefix_str = ipnetwork.sprint_prefix(prefix_entry.prefix) prefix_type = ipnetwork.sprint_prefix_type(prefix_entry.type) forwarding_type = ipnetwork.sprint_prefix_forwarding_type( prefix_entry.forwardingType) rows.append((prefix_type, prefix_str, forwarding_type)) print( "\n", printing.render_horizontal_table( rows, ["Type", "Prefix", "Forwarding Type"]), ) print()
def print_paths(self, paths): if not paths: print("No paths are found!") return column_labels = ['Hop', 'NextHop Node', 'Interface', 'Metric', 'NextHop-v6'] print("{} {} found.".format(len(paths), 'path is' if len(paths) == 1 else 'paths are')) for idx, path in enumerate(paths): print(printing.render_horizontal_table( path[1], column_labels, caption="Path {}{}".format(idx + 1, " *" if path[0] else ""), tablefmt="plain")) print()