def process_request(self): req = self._prefix_mgr_server_socket.recv_thrift_obj( prefix_mgr_types.PrefixManagerRequest) if req.cmd == prefix_mgr_types.PrefixManagerCommand.ADD_PREFIXES: for prefix_entry in req.prefixes: self._prefix_map[sprint_prefix( prefix_entry.prefix)] = prefix_entry self._prefix_mgr_server_socket.send_thrift_obj( prefix_mgr_types.PrefixManagerResponse(success=True)) if req.cmd == prefix_mgr_types.PrefixManagerCommand.WITHDRAW_PREFIXES: success = False for prefix_entry in req.prefixes: prefix_str = sprint_prefix(prefix_entry.prefix) if prefix_str in self._prefix_map: del self._prefix_map[prefix_str] success = True self._prefix_mgr_server_socket.send_thrift_obj( prefix_mgr_types.PrefixManagerResponse(success=success)) if req.cmd == prefix_mgr_types.PrefixManagerCommand.GET_ALL_PREFIXES: resp = prefix_mgr_types.PrefixManagerResponse() resp.prefixes = self._prefix_map.values() resp.success = True self._prefix_mgr_server_socket.send_thrift_obj(resp)
def _parse_nodes(rows, value): prefix_db = serializer.deserialize_thrift_object( value.value, lsdb_types.PrefixDatabase) marker = "* " if prefix_db.thisNodeName == host_id else "> " row = ["{}{}".format(marker, prefix_db.thisNodeName)] loopback_prefixes = [ p.prefix for p in prefix_db.prefixEntries if p.type == network_types.PrefixType.LOOPBACK ] loopback_prefixes.sort(key=lambda x: len(x.prefixAddress.addr), reverse=True) # pick the very first most specific prefix loopback_v6 = None loopback_v4 = None for p in loopback_prefixes: if len(p.prefixAddress.addr) == 16 and ( loopback_v6 is None or p.prefixLength > loopback_v6.prefixLength): loopback_v6 = p if len(p.prefixAddress.addr) == 4 and ( loopback_v4 is None or p.prefixLength > loopback_v4.prefixLength): loopback_v4 = p row.append( ipnetwork.sprint_prefix(loopback_v6) if loopback_v6 else "N/A") row.append( ipnetwork.sprint_prefix(loopback_v4) if loopback_v4 else "N/A") rows.append(row)
def _parse(loopback_set, prefix_db): for prefix_entry in prefix_db.prefixEntries: # Only consider v6 address if len(prefix_entry.prefix.prefixAddress.addr) != 16: continue # Parse PrefixAllocator address if prefix_entry.type == network_types.PrefixType.PREFIX_ALLOCATOR: prefix = ipnetwork.sprint_prefix(prefix_entry.prefix) if prefix_entry.prefix.prefixLength == 128: prefix = prefix.split("/")[0] else: # TODO: we should ideally get address with last bit # set to 1. `python3.6 ipaddress` libraries does this # in one line. Alas no easy options with ipaddr # NOTE: In our current usecase we are just assuming # that allocated prefix has last 16 bits set to 0 prefix = prefix.split("/")[0] + "1" loopback_set.add(prefix) continue # Parse LOOPBACK address if prefix_entry.type == network_types.PrefixType.LOOPBACK: prefix = ipnetwork.sprint_prefix(prefix_entry.prefix) loopback_set.add(prefix.split("/")[0]) continue
def __init__(self, zmq_ctx, url): self._prefix_mgr_server_socket = socket.Socket(zmq_ctx, zmq.REP) self._prefix_mgr_server_socket.bind(url) self._prefix_map = { sprint_prefix(prefix_entry1.prefix): prefix_entry1, sprint_prefix(prefix_entry2.prefix): prefix_entry2, sprint_prefix(prefix_entry3.prefix): prefix_entry3 }
def get_nexthop_nodes( self, route_db, dst_addr, cur_lpm_len, if2node, fib_routes, in_fib ): """ get the next hop nodes. if the longest prefix is coming from the current node, return an empty list to terminate the path searching. """ next_hop_nodes = [] is_initialized = fib_routes[route_db.thisNodeName] lpm_route = self.get_lpm_route(route_db, dst_addr) if lpm_route and lpm_route.prefix.prefixLength >= cur_lpm_len: if in_fib and not is_initialized: fib_routes[route_db.thisNodeName].extend( self.get_fib_path( route_db.thisNodeName, ipnetwork.sprint_prefix(lpm_route.prefix), self.fib_agent_port, self.timeout, ) ) min_cost = min(p.metric for p in lpm_route.paths) for path in [p for p in lpm_route.paths if p.metric == min_cost]: if len(path.nextHop.addr) == 16: nh_addr = ipnetwork.sprint_addr(path.nextHop.addr) next_hop_node_name = if2node[route_db.thisNodeName][ (path.ifName, nh_addr) ] next_hop_nodes.append( [next_hop_node_name, path.ifName, path.metric, nh_addr] ) return next_hop_nodes
def _run(self, client: OpenrCtrl.Client): policy = None try: policy = client.getRibPolicy() except ctrl_types.OpenrError as e: print(f"Error: {e.message}", file=sys.stderr) return # Convert the prefixes to readable format assert policy is not None for stmt in policy.statements: if stmt.matcher.prefixes: stmt.matcher.prefixes = [ ipnetwork.sprint_prefix(p) for p in stmt.matcher.prefixes ] # NOTE: We don't do explicit effor to print policy in print("> RibPolicy") print(f" Validity: {policy.ttl_secs}s") for stmt in policy.statements: prefixes = stmt.matcher.prefixes or [] action = stmt.action.set_weight or ctrl_types.RibRouteActionWeight( ) print(f" Statement: {stmt.name}") print(f" Prefix Match List: {', '.join(prefixes)}") print(" Action Set Weight:") print(f" Default: {action.default_weight}") print(" Area:") for area, weight in action.area_to_weight.items(): print(f" {area}: {weight}") print(" Neighbor:") for neighbor, weight in action.neighbor_to_weight.items(): print(f" {neighbor}: {weight}")
def print_unicast_routes( caption: str, unicast_routes: List[network_types.UnicastRoute], prefixes: List[str] = None, ): """ Print unicast routes. Subset specified by prefixes will be printed if specified """ networks = None if prefixes: networks = [ipaddress.ip_network(p) for p in prefixes] route_strs = [] for route in unicast_routes: dest = ipnetwork.sprint_prefix(route.dest) if not ipnetwork.contain_any_prefix(dest, networks): continue paths_str = "\n".join( ["via {}".format(ip_nexthop_to_str(nh)) for nh in get_route_nexthops(route)] ) route_strs.append([dest, paths_str]) print(printing.render_vertical_table(route_strs, caption=caption))
def _update(route_dict, route): route_dict.update( { "dest": ipnetwork.sprint_prefix(route.dest), "nextHops": [next_hop_thrift_to_dict(nh) for nh in route.nextHops], } )
def sprint_prefixes_db_full(prefix_db, loopback_only=False): """ given serialized prefixes output an array of lines representing those prefixes. IPV6 prefixes come before IPV4 prefixes. :prefix_db lsdb_types.PrefixDatabase: prefix database :loopback_only : is only loopback address expected :return [str]: the array of prefix strings """ prefix_strs = [] sorted_entries = sorted( sorted(prefix_db.prefixEntries, key=lambda x: x.prefix.prefixLength), key=lambda x: x.prefix.prefixAddress.addr, ) for prefix_entry in sorted_entries: if loopback_only and prefix_entry.type is not network_types.PrefixType.LOOPBACK: continue prefix_strs.append([ ipnetwork.sprint_prefix(prefix_entry.prefix), ipnetwork.sprint_prefix_type(prefix_entry.type), ipnetwork.sprint_prefix_forwarding_type( prefix_entry.forwardingType), ]) return printing.render_horizontal_table( prefix_strs, ["Prefix", "Client Type", "Forwarding Type"])
def _update(interface_info_dict, interface_info): interface_info_dict.update({ "networks": [ ipnetwork.sprint_prefix(prefix) for prefix in interface_info.networks ] })
def print_routes_table(route_db, prefixes=None): """ print the the routes from Decision/Fib module """ networks = None if prefixes: networks = [ipaddress.ip_network(p) for p in prefixes] route_strs = [] for route in sorted(route_db.routes, key=lambda x: x.prefix.prefixAddress.addr): prefix_str = ipnetwork.sprint_prefix(route.prefix) if not ipnetwork.contain_any_prefix(prefix_str, networks): continue paths_str = "\n".join([ "via {}%{} metric {}".format( ipnetwork.sprint_addr(path.nextHop.addr), path.ifName, path.metric) for path in route.paths ]) route_strs.append((prefix_str, paths_str)) caption = "Routes for {}".format(route_db.thisNodeName) if not route_strs: route_strs.append(["No routes found."]) print(printing.render_vertical_table(route_strs, caption=caption))
def _update(route_dict, route): route_dict.update( { "prefix": ipnetwork.sprint_prefix(route.prefix), "paths": list(map(path_to_dict, route.paths)), } )
def get_lpm_route(self, route_db, dst_addr): ''' find the routes to the longest prefix matches of dst. ''' max_prefix_len = -1 lpm_route = None for route in route_db.routes: if IPNetwork(ipnetwork.sprint_prefix(route.prefix)).Contains( IPAddress(dst_addr)): next_hop_prefix_len = route.prefix.prefixLength if next_hop_prefix_len == max_prefix_len: raise Exception('Duplicate prefix found in routing table {}' .format(ipnetwork.sprint_prefix(route.prefix))) elif next_hop_prefix_len > max_prefix_len: lpm_route = route max_prefix_len = next_hop_prefix_len return lpm_route
def print_allocations_table(alloc_str): """ print static allocations """ rows = [] allocations = deserialize_thrift_object(alloc_str, alloc_types.StaticAllocation) for node, prefix in allocations.nodePrefixes.items(): rows.append([node, ipnetwork.sprint_prefix(prefix)]) print(printing.render_horizontal_table(rows, ["Node", "Prefix"]))
def _parse_loopback_addrs(addrs, value): v4_addrs = addrs["v4"] v6_addrs = addrs["v6"] prefix_db = serializer.deserialize_thrift_object( value.value, lsdb_types.PrefixDatabase ) for prefixEntry in prefix_db.prefixEntries: p = prefixEntry.prefix if prefixEntry.type != network_types.PrefixType.LOOPBACK: continue if len(p.prefixAddress.addr) == 16 and p.prefixLength == 128: v6_addrs[prefix_db.thisNodeName] = ipnetwork.sprint_prefix(p) if len(p.prefixAddress.addr) == 4 and p.prefixLength == 32: v4_addrs[prefix_db.thisNodeName] = ipnetwork.sprint_prefix(p)
def _update(prefix_entry_dict, prefix_entry): # Only addrs need string conversion so we udpate them prefix_entry_dict.update({ 'prefix': ipnetwork.sprint_prefix(prefix_entry.prefix), 'data': str(prefix_entry.data), })
def _update(prefix_entry_dict, prefix_entry): # Only addrs need string conversion so we udpate them prefix_entry_dict.update({ "prefix": ipnetwork.sprint_prefix(prefix_entry.prefix), "data": prefix_entry.data, })
def run(self): resp = self.client.view_prefix() rows = [] for prefix_entry in resp.prefixes: prefix_str = ipnetwork.sprint_prefix(prefix_entry.prefix) prefix_type = ipnetwork.sprint_prefix_type(prefix_entry.type) rows.append((prefix_type, prefix_str)) print('\n', printing.render_horizontal_table(rows, ['Type', 'Prefix'])) print()
def get_lpm_route(self, route_db, dst_addr): """find the routes to the longest prefix matches of dst.""" max_prefix_len = -1 lpm_route = None dst_addr = ipaddress.ip_address(dst_addr) for route in route_db.unicastRoutes: prefix = ipaddress.ip_network(ipnetwork.sprint_prefix(route.dest)) if dst_addr in prefix: next_hop_prefix_len = route.dest.prefixLength if next_hop_prefix_len == max_prefix_len: raise Exception( "Duplicate prefix found in routing table {}".format( ipnetwork.sprint_prefix(route.dest))) elif next_hop_prefix_len > max_prefix_len: lpm_route = route max_prefix_len = next_hop_prefix_len return lpm_route
def build_unicast_route( route: object, filter_for_networks: Optional[List[Union[ipaddress.IPv4Network, ipaddress.IPv6Network]]] = None, ) -> Tuple[str, List[str]]: dest = ipnetwork.sprint_prefix(route.dest) if filter_for_networks and not ipnetwork.contain_any_prefix( dest, filter_for_networks): return None nexthops = [ip_nexthop_to_str(nh) for nh in get_route_nexthops(route)] return dest, nexthops
def _update(interface_info_dict, interface_info): interface_info_dict.update({ # TO BE DEPRECATED SOON 'v4Addrs': [ipnetwork.sprint_addr(v4Addr.addr) for v4Addr in interface_info.v4Addrs], # TO BE DEPRECATED SOON 'v6LinkLocalAddrs': [ipnetwork.sprint_addr(v6Addr.addr) for v6Addr in interface_info.v6LinkLocalAddrs], 'networks': [ipnetwork.sprint_prefix(prefix) for prefix in interface_info.networks], })
def _parse_nodes(rows, value): prefix_db = serializer.deserialize_thrift_object( value.value, lsdb_types.PrefixDatabase) marker = '* ' if prefix_db.thisNodeName == host_id else '> ' row = ["{}{}".format(marker, prefix_db.thisNodeName)] loopback_prefixes = [ p.prefix for p in prefix_db.prefixEntries if p.type == lsdb_types.PrefixType.LOOPBACK ] loopback_prefixes.sort(key=lambda x: len(x.prefixAddress.addr), reverse=True) row.extend([ipnetwork.sprint_prefix(p) for p in loopback_prefixes]) rows.append(row)
def print_ip_prefixes_filtered( self, ip_prefixes: List[network_types.IpPrefix], prefixes_filter: Optional[List[str]] = None, element_prefix: str = ">", element_suffix: str = "", ) -> None: """ Print prefixes. If specified, exact match subset of prefixes_filter only will be printed. :param unicast_routes: Unicast routes :param prefixes_filter: Optional prefixes/filter to print (Exact match). :param element_prefix: Starting prefix for each item. (string) :param element_suffix: Ending/terminator for each item. (string) """ filter_for_networks = None if prefixes_filter: filter_for_networks = [ipaddress.ip_network(p) for p in prefixes_filter] prefix_strs = [] for ip_prefix in ip_prefixes: if ( filter_for_networks and not ipaddress.ip_network(ipnetwork.sprint_prefix(ip_prefix)) in filter_for_networks ): continue prefix_strs.append([ipnetwork.sprint_prefix(ip_prefix)]) print( printing.render_vertical_table( prefix_strs, element_prefix=element_prefix, element_suffix=element_suffix, timestamp=True, ) )
def get_fib_path(self, src, dst_prefix, fib_agent_port, timeout): src_addr = self.get_loopback_addr(src) if src_addr is None: return [] try: client = utils.get_fib_agent_client(src_addr, fib_agent_port, timeout) routes = client.getRouteTableByClient(client.client_id) except Exception: return [] for route in routes: if ipnetwork.sprint_prefix(route.dest) == dst_prefix: return route.nexthops return []
def _run(self, client: OpenrCtrl.Client) -> None: prefixes = client.getPrefixes() rows = [] for prefix_entry in prefixes: prefix_str = ipnetwork.sprint_prefix(prefix_entry.prefix) prefix_type = ipnetwork.sprint_prefix_type(prefix_entry.type) forwarding_type = ipnetwork.sprint_prefix_forwarding_type( prefix_entry.forwardingType) rows.append((prefix_type, prefix_str, forwarding_type)) print( "\n", printing.render_horizontal_table( rows, ["Type", "Prefix", "Forwarding Type"]), ) print()
def print_routes(caption, routes, prefixes=None): networks = None if prefixes: networks = [ipaddress.ip_network(p) for p in prefixes] route_strs = [] for route in routes: dest = ipnetwork.sprint_prefix(route.dest) if not ipnetwork.contain_any_prefix(dest, networks): continue paths_str = '\n'.join(["via {}".format(ip_nexthop_to_str(nh)) for nh in route.nexthops]) route_strs.append((dest, paths_str)) print(printing.render_vertical_table(route_strs, caption=caption))
def run(self): resp = self.client.view_prefix() rows = [] for prefix_entry in resp.prefixes: prefix_str = ipnetwork.sprint_prefix(prefix_entry.prefix) prefix_type = ipnetwork.sprint_prefix_type(prefix_entry.type) forwarding_type = ipnetwork.sprint_prefix_forwarding_type( prefix_entry.forwardingType) is_ephemeral_str = ipnetwork.sprint_prefix_is_ephemeral( prefix_entry) rows.append( (prefix_type, prefix_str, forwarding_type, is_ephemeral_str)) print( "\n", printing.render_horizontal_table( rows, ["Type", "Prefix", "Forwarding Type", "Ephemeral"]), ) print()
def get_routes_json(host, client, routes, prefixes=None): networks = None if prefixes: networks = [ipaddress.ip_network(p) for p in prefixes] data = {"host": host, "client": client, "routes": []} for route in routes: dest = ipnetwork.sprint_prefix(route.dest) if not ipnetwork.contain_any_prefix(dest, networks): continue route_data = { "dest": dest, "nexthops": [ip_nexthop_to_str(nh) for nh in route.nexthops], } data["routes"].append(route_data) return data
def get_route_as_dict(routes): """ Convert a routeDb into a dict representing routes in str format :param routes: list network_types.UnicastRoute (structured routes) :returns: dict of routes {prefix: [nexthops]} :rtype: dict """ # Thrift object instances do not have hash support # Make custom stringified object so we can hash and diff # dict of prefixes(str) : nexthops(str) routes_dict = { ipnetwork.sprint_prefix(route.dest): sorted( ip_nexthop_to_str(nh, True) for nh in get_route_nexthops(route)) for route in routes } return routes_dict
def _run(self, client: OpenrCtrl.Client, *args, **kwargs): policy = None try: policy = client.getRibPolicy() except ctrl_types.OpenrError as e: print("Error: ", str(e), "\nSystem standard error: ", sys.stderr) return # Convert the prefixes to readable format assert policy is not None for stmt in policy.statements: if stmt.matcher.prefixes: stmt.matcher.prefixes = [ ipnetwork.sprint_prefix(p) # pyre-fixme[16]: `Optional` has no attribute `__iter__`. for p in stmt.matcher.prefixes ] # NOTE: We don't do explicit effor to print policy in print("> RibPolicy") print(f" Validity: {policy.ttl_secs}s") for stmt in policy.statements: prefixes = stmt.matcher.prefixes or [] tags = stmt.matcher.tags or [] action = stmt.action.set_weight or ctrl_types.RibRouteActionWeight( ) print(f" Statement: {stmt.name}") if prefixes: # pyre-fixme[6]: Expected `Iterable[str]` for 1st param but got # `Union[List[typing.Any], List[network_types.IpPrefix]]`. print(f" Prefix Match List: {', '.join(prefixes)}") if tags: print(f" Tags Match List: {', '.join(tags)}") print(" Action Set Weight:") print(f" Default: {action.default_weight}") print(" Area:") for area, weight in action.area_to_weight.items(): print(f" {area}: {weight}") print(" Neighbor:") for neighbor, weight in action.neighbor_to_weight.items(): print(f" {neighbor}: {weight}")