def forwarding_address_of(self, src, dst): """ Return the forwarding address for a src, dst pair. If src is specified, return the private 'link-local' address of the src-dst link, otherwise return a 'public' IP belonging to dst :param src: the source node of the link towards the FA, possibly null :param dst: the node owning the forwarding address :return: forwarding address (str) or None if no compatible address was found """ # If we have a src address, we want the set of private IPs # Otherwise we want any IP of dst u, v, key = ((src, dst, 'dst_address') if src else (dst, self.graph.neighbors(dst)[0], 'src_address')) try: edge = self.graph[u][v] except KeyError: log.error('%s-%s not found in graph when resolving ' 'forwarding address of (%s,%s)', u, v, src, dst) return None try: return edge[key] except KeyError: log.error('%s not found in the properties of edge %s-%s ' 'when resolving forwarding address of (%s, %s)\n%s', key, u, v, src, dst, edge) return None
def combine_ranges(self, n, s): """Attempt to combine the lb,ub interval between the two nodes""" node, succ = self.node(n), self.node(s) cost = self._p.default_cost(n, s) new_ub = min(node.ub - cost, succ.ub) new_lb = max(node.lb - cost, succ.lb) # Log these errors which should never happen # as propagation should prevent this if new_lb > succ.lb: log.error( 'Merging %s into %s resulted in a LB increase from ' '%s to %s (%s' 's LB: %s, spt cost: %s)', n, s, succ.lb, new_lb, n, node.lb, cost) elif new_lb < succ.lb: log.error( 'Merging %s into %s resulted in a LB decrease from ' '%s to %s (%s' 's LB: %s, spt cost: %s)', n, s, succ.lb, new_lb, n, node.lb, cost) # Report unfeasible merge if not self.valid_range(s, new_lb, new_ub): log.debug( 'Merging %s into %s would lead to bounds of ' ']%s, %s[, aborting', n, s, new_lb, new_ub) return None return new_lb, new_ub
def need_root(): """ Ensures that the program is run as root """ if os.getuid() != 0: log.error('%s: Must be run as root!', sys.argv[0]) sys.exit(1)
def __read_private_ips(self, filename): router_private_address = defaultdict(dict) ip_to_bd = defaultdict(list) try: with open(filename, 'r') as f: private_address_binding = json.load(f) for subnets in private_address_binding.itervalues(): # Log router id in broadcast domain sub = subnets.keys() for rid, ip in subnets.iteritems(): # Enable single private address as string if not is_container(ip): ip = [ip] # Log private addresses adjacencies other = sub[:] other.remove(rid) for s in other: router_private_address[rid][s] = ip for i in ip: # Register the broadcast domain for each ip ip_to_bd[i] = other except ValueError as e: log.error('Incorrect private IP addresses binding file') log.error(str(e)) ip_to_bd.clear() router_private_address.clear() except IOError as e: log.warning('Cannot read private address file') ip_to_bd.clear() router_private_address.clear() return router_private_address, ip_to_bd
def parse(lsa_prop): for subcls in Link.__subclasses__(): if subcls.TYPE == lsa_prop[LINK_TYPE]: return subcls(lsa_prop[LINKID], lsa_prop[LINK_DATA], lsa_prop[METRIC]) log.error('Couldn' 't parse the link %s', lsa_prop) return None
def __init__(self): self.private_address_network = ip_network(CFG.get(DEFAULTSECT, 'private_net')) try: with open(CFG.get(DEFAULTSECT, 'private_ips'), 'r') as f: self.private_address_binding = json.load(f) self.router_private_address = {} for subnets in self.private_address_binding.itervalues(): for rid, ip in subnets.iteritems(): try: iplist = self.router_private_address[rid] except KeyError: iplist = self.router_private_address[rid] = [] iplist.append(ip) except Exception as e: log.error('Incorrect private IP addresses binding file') log.error(str(e)) self.private_address_binding = {} self.router_private_address = {} self.last_line = '' self.transaction = None self.graph = DiGraph() self.routers = {} # router-id : lsa self.networks = {} # DR IP : lsa self.ext_networks = {} # (router-id, dest) : lsa self.controllers = defaultdict(list) # controller nr : ip_list self.listener = {} self.keep_running = True self.queue = Queue() self.processing_thread = Thread(target=self.process_lsa, name="lsa_processing_thread") self.processing_thread.start()
def forwarding_address_of(self, src, dst): """ Return the forwarding address for a src, dst pair. If src is specified, return the private 'link-local' address of the src-dst link, otherwise return a 'public' IP belonging to dst :param src: the source node of the link towards the FA, possibly null :param dst: the node owning the forwarding address :return: forwarding address (str) or None if no compatible address was found """ # If we have a src address, we want the set of private IPs # Otherwise we want any IP of dst u, v, key = ((src, dst, 'dst_address') if src else (dst, self.graph.neighbors(dst)[0], 'src_address')) try: edge = self.graph[u][v] except KeyError: log.error( '%s-%s not found in graph when resolving ' 'forwarding address of (%s,%s)', u, v, src, dst) return None try: return edge[key] except KeyError: log.error( '%s not found in the properties of edge %s-%s ' 'when resolving forwarding address of (%s, %s)\n%s', key, u, v, src, dst, edge) return None
def create_ns(self): if os.path.exists(NSDIR) and ' %s ' % self.name in os.listdir(NSDIR): self.delete() err = _netns('add', self.name) if err != 0: log.error('Failed to create namespace %s', self.name) else: log.debug('Created namespace %s', self.name)
def parse(lsa_prop): for subcls in Link.__subclasses__(): if subcls.TYPE == lsa_prop[LINK_TYPE]: return subcls(lsa_prop[LINKID], lsa_prop[LINK_DATA], lsa_prop[METRIC]) log.error('Couldn''t parse the link %s', lsa_prop) return None
def create_link(self): """ Create a veth link between the source and the destination ports """ cmd = ['ip', 'link', 'add', self.src.id, 'type', 'veth', 'peer', 'name', self.dst.id] log.debug('Creating link: %s', cmd) err = subprocess.call(cmd) if err != 0: log.error('Failed to create veth link: %s', cmd) sys.exit(1)
def parse(lsa_header, lsa_prop): """ Create a new LSA based on the property dicts given :param lsa_header: an LSAHeader instance :param lsa_prop: a property dictionary :return: a new LSA instance """ for subcls in LSA.__subclasses__(): if subcls.TYPE == lsa_header.lsa_type: return subcls.parse(lsa_header, lsa_prop) log.error('Couldn''t parse the LSA type %S [%s]', lsa_header.lsa_type, lsa_prop) return None
def require_cmd(cmd, help_str=None): """ Ensures that a command is available in $PATH :param cmd: the command to test :param help_str: an optional help string to display if cmd is not found """ # Check if cmd is a valid absolute path if os.path.isfile(cmd): return # Try to find the cmd in each directory in $PATH for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe = os.path.join(path, cmd) if os.path.isfile(exe): return log.error('[%s] is not available in $PATH', cmd) if help_str: log.error(help_str) sys.exit(1)
def forwarding_address_of(self, src, dst): """ Return the forwarding address for a src, dst pair. If src is specified, return the private 'link-local' address of the src-dst link, otherwise return a 'public' IP belonging to dst :param src: the source node of the link towards the FA, possibly null :param dst: the node owning the forwarding address :return: forwarding address (str) or None if no compatible address was found """ # If we have a src address, we want the set of private IPs # Otherwise we want any IP of dst if src: try: return self.graph[src][dst]['dst_address'] except KeyError as e: log.error("Couldn't resolve local forwarding of %s-%s, missing" " key %s", src, dst, e) else: try: data = filter(lambda v: v is not None, (self.graph[dst][succ].get('src_address', None) for succ in self.graph.successors_iter(dst))) if data: return min(data) log.error("Cannot use %s as nexthop as it has no physical " "link to other routers!", dst) except KeyError: log.error("Couldn't find nexthop %s when resolving global " "forwarding address", dst) return None
def install_route(self, network, points, advertize): """ Install and advertize a fibbing route :param network: the network prefix to attract :param points: a list of (address, metric) of points """ try: net = ip_network(network) except ValueError: log.error('%s is not a valid IP network', network) return try: points_data = [(ip_address(addr), int(metric)) for addr, metric in points] except ValueError as e: log.error("Failed to parse an attraction point: %s", e) return # Retrieve existing route if any try: route = self.routes[net] except KeyError: route = FibbingRoute(net, []) self.routes[net] = route self.route_mappings[net] = set() # Get used nodes mapping for this prefix mappings = self.route_mappings[net] # Increase node count if needed size = len(route) + len(points) while size > len(self.nodes): self.add_node() # Get available node list for this prefix nodes = [n for n in self.nodes.values() if n not in mappings] # Generate attraction points attraction_points = [AttractionPoint(addr, metric, nodes.pop()) for addr, metric in points_data] # Update used nodes mapping for p in attraction_points: mappings.add(p.node) # Advertize them route.append(attraction_points, advertize)
def combine_ranges(self, n, s): """Attempt to combine the lb,ub interval between the two nodes""" node, succ = self.node(n), self.node(s) cost = self._p.default_cost(n, s) new_ub = min(node.ub - cost, succ.ub) new_lb = max(node.lb - cost, succ.lb) # Log these errors which should never happen # as propagation should prevent this if new_lb > succ.lb: log.error('Merging %s into %s resulted in a LB increase from ' '%s to %s (%s''s LB: %s, spt cost: %s)', n, s, succ.lb, new_lb, n, node.lb, cost) elif new_lb < succ.lb: log.error('Merging %s into %s resulted in a LB decrease from ' '%s to %s (%s''s LB: %s, spt cost: %s)', n, s, succ.lb, new_lb, n, node.lb, cost) # Report unfeasible merge if not self.valid_range(s, new_lb, new_ub): log.debug('Merging %s into %s would lead to bounds of ' ']%s, %s[, aborting', n, s, new_lb, new_ub) return None return new_lb, new_ub
def solvable(dag, graph): """Check that the given DAG can be embedded in the graph""" for u, v in dag.edges_iter(): try: graph[u][v] except KeyError: log.error("Cannot satisfy the DAG " " as (%s, %s) is not in the IGP graph", u, v) log.error("Available edges: %s", graph.edges()) log.error("DAG: %s", dag.edges()) return False return True
def solvable(self, dest, dag): for u, v in dag.edges_iter(): try: self.igp_graph[u][v] except: logger.error('Cannot satisfy the DAG for dest %s ' ' as (%s, %s) is not in the IGP graph', dest, u, v) logger.error('Available edges: %s', self.igp_graph.edges()) logger.error('DAG: %s', dag.edges()) return False return True
def solvable(dag, graph): """Check that the given DAG can be embedded in the graph""" for u, v in dag.edges_iter(): try: graph[u][v] except KeyError: log.error('Cannot satisfy the DAG ' ' as (%s, %s) is not in the IGP graph', u, v) log.error('Available edges: %s', graph.edges()) log.error('DAG: %s', dag.edges()) return False return True
def dump_threads(): """ Shouldn't be used except for debugging purpose (e.g. find deadlocks) """ import traceback log.error("*** STACKTRACE - START ***") code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) for line in code: log.error(line.strip('\n')) log.error("*** STACKTRACE - END ***")
def check_fwd_dags(fwd_req, topo, lsas, solver): correct = True topo = topo.copy() # Check that the topology/dag contain the destinations, otherwise add it for dest, dag in fwd_req.iteritems(): dest_in_dag = dest in dag dest_in_graph = dest in topo if not dest_in_dag or not dest_in_graph: if not dest_in_dag: sinks = ssu.find_sink(dag) else: sinks = dag.predecessors(dest) for s in sinks: if not dest_in_dag: dag.add_edge(s, dest) if not dest_in_graph: topo.add_edge(s, dest, metric=solver.new_edge_metric) fake_nodes = {} local_fake_nodes = collections.defaultdict(list) f_ids = set() for lsa in lsas: if lsa.cost > 0: if not lsa.node: # We added a pure fake LSA continue f_id = '__f_%s_%s_%s' % (lsa.node, lsa.nh, lsa.dest) f_ids.add(f_id) fake_nodes[(lsa.node, f_id, lsa.dest)] = lsa.nh cost = topo[lsa.node][lsa.nh]['metric'] topo.add_edge(lsa.node, f_id, metric=cost) topo.add_edge(f_id, lsa.dest, metric=lsa.cost - cost) log.debug('Added a globally-visible fake node: ' '%s - %s - %s - %s - %s [-> %s]', lsa.node, cost, f_id, lsa.cost - cost, lsa.dest, lsa.nh) else: local_fake_nodes[(lsa.node, lsa.dest)].append(lsa.nh) log.debug('Added a locally-visible fake node: %s -> %s', lsa.node, lsa.nh) spt = ssu.all_shortest_paths(topo, metric='metric') for dest, req_dag in fwd_req.iteritems(): log.info('Validating requirements for dest %s', dest) dag = IGPGraph() for n in filter(lambda n: n not in fwd_req, topo): if n in f_ids: continue log.debug('Checking paths of %s', n) for p in spt[n][0][dest]: log.debug('Reported path: %s', p) for u, v in zip(p[:-1], p[1:]): try: # Are we using a globally-visible fake node? nh = fake_nodes[(u, v, dest)] log.debug('%s uses the globally-visible fake node %s ' 'to get to %s', u, v, nh) dag.add_edge(u, nh) # Replace by correct next-hop break except KeyError: # Are we using a locally-visible one? nh = local_fake_nodes[(u, dest)] if nh: log.debug('%s uses a locally-visible fake node' ' to get to %s', u, nh) for h in nh: dag.add_edge(u, h) # Replace by true nh break else: dag.add_edge(u, v) # Otherwise follow the SP # Now that we have the current fwing dag, compare to the requirements for n in req_dag: successors = set(dag.successors(n)) req_succ = set(req_dag.successors(n)) if successors ^ req_succ: log.error('The successor sets for node %s differ, ' 'REQ: %s, CURRENT: %s', n, req_succ, successors) correct = False predecessors = set(dag.predecessors(n)) req_pred = set(req_dag.predecessors(n)) # Also requires to have a non-null successor sets to take into # account the fact that the destination will have new adjacencies # through fake nodes if predecessors ^ req_pred and successors: log.error('The predecessors sets for %s differ, ' 'REQ: %s, CURRENT: %s', n, req_pred, predecessors) correct = False if correct: log.info('All forwarding requirements are enforced!') return correct
""" As this module has an hard dependency against mininet and on the availability of some commands, perform the import at the top-level in order to make the relevant checks once, at import time. Furthermore, all these classes will be import anyway at some point when instantiating a Fibbing lab ... """ try: import mininet # noqa except ImportError as e: from fibbingnode import log import sys log.error('Failed to import mininet!') log.error('Using the mininetlib module requires mininet to be ' 'installed.\n' 'Visit www.mininet.org to learn how to do so.\n') sys.exit(1) PRIVATE_IP_KEY = '__fibbing_private_ips' CFG_KEY = '__fibbing_controller_config_key' BDOMAIN_KEY = '__fibbing_broadcast_domains' FIBBING_MIN_COST = 2 DEBUG_FLAG = False def get_logger(): import mininet.log as l l.setLogLevel('info') return l.lg
output = CFG.get(DEFAULTSECT, 'graph_loc') if os.path.exists(output): os.unlink(output) plt.savefig(output) plt.close() log.debug('Graph of %d nodes saved in %s', len(graph), output) except: pass # The draw_graph call will be remapped to 'nothing' if matplotlib (aka extra # packages) is not available try: from networkx import spring_layout, draw_networkx_edge_labels, draw import matplotlib.pyplot as plt except ImportError as e: log.error('Missing packages to draw the network') log.exception(e) draw_graph = lambda x: True def contract_graph(graph, nodes, into): """ Contract the graph :param graph: The graph to contract :param nodes: The set of nodes to contract into one :param into: The (new) node that should be the contraction """ edges = graph.edges(nodes, data=True) graph.add_edges_from(map(lambda x: (into, x[1], x[2]), edges)) graph.remove_nodes_from(nodes)
""" As this module has an hard dependency against mininet and on the availability of some commands, perform the import at the top-level in order to make the relevant checks once, at import time. Furthermore, all these classes will be import anyway at some point when instantiating a Fibbing lab ... """ try: import mininet # noqa except ImportError as e: from fibbingnode import log import sys log.error('Failed to import mininet!') log.error('Using the mininetlib module requires mininet to be ' 'installed.\n' 'Visit www.mininet.org to learn how to do so.\n') sys.exit(1) PRIVATE_IP_KEY = '__fibbing_private_ips' CFG_KEY = '__fibbing_controller_config_key' BDOMAIN_KEY = '__fibbing_broadcast_domains' FIBBING_MIN_COST = 2 FIBBING_DEFAULT_AREA = '0.0.0.0' DEBUG_FLAG = False def get_logger(): import mininet.log as l l.setLogLevel('info') return l.lg
def check_fwd_dags(fwd_req, topo, lsas, solver): correct = True topo = topo.copy() # Check that the topology/dag contain the destinations, otherwise add it for dest, dag in fwd_req.iteritems(): dest_in_dag = dest in dag dest_in_graph = dest in topo if not dest_in_dag or not dest_in_graph: if not dest_in_dag: sinks = ssu.find_sink(dag) else: sinks = dag.predecessors(dest) for s in sinks: if not dest_in_dag: dag.add_edge(s, dest) if not dest_in_graph: topo.add_edge(s, dest, metric=solver.new_edge_metric) fake_nodes = {} local_fake_nodes = collections.defaultdict(list) f_ids = set() for lsa in lsas: if lsa.cost > 0: f_id = '__f_%s_%s_%s' % (lsa.node, lsa.nh, lsa.dest) f_ids.add(f_id) fake_nodes[(lsa.node, f_id, lsa.dest)] = lsa.nh cost = topo[lsa.node][lsa.nh]['metric'] topo.add_edge(lsa.node, f_id, metric=cost) topo.add_edge(f_id, lsa.dest, metric=lsa.cost - cost) log.debug( 'Added a globally-visible fake node: ' '%s - %s - %s - %s - %s [-> %s]', lsa.node, cost, f_id, lsa.cost - cost, lsa.dest, lsa.nh) else: local_fake_nodes[(lsa.node, lsa.dest)].append(lsa.nh) log.debug('Added a locally-visible fake node: %s -> %s', lsa.node, lsa.nh) spt = ssu.all_shortest_paths(topo, metric='metric') for dest, req_dag in fwd_req.iteritems(): log.info('Validating requirements for dest %s', dest) dag = IGPGraph() for n in filter(lambda n: n not in fwd_req, topo): if n in f_ids: continue log.debug('Checking paths of %s', n) for p in spt[n][0][dest]: log.debug('Reported path: %s', p) for u, v in zip(p[:-1], p[1:]): try: # Are we using a globally-visible fake node? nh = fake_nodes[(u, v, dest)] log.debug( '%s uses the globally-visible fake node %s ' 'to get to %s', u, v, nh) dag.add_edge(u, nh) # Replace by correct next-hop break except KeyError: # Are we using a locally-visible one? nh = local_fake_nodes[(u, dest)] if nh: log.debug( '%s uses a locally-visible fake node' ' to get to %s', u, nh) for h in nh: dag.add_edge(u, h) # Replace by true nh break else: dag.add_edge(u, v) # Otherwise follow the SP # Now that we have the current fwing dag, compare to the requirements for n in req_dag: successors = set(dag.successors(n)) req_succ = set(req_dag.successors(n)) if successors ^ req_succ: log.error( 'The successor sets for node %s differ, ' 'REQ: %s, CURRENT: %s', n, req_succ, successors) correct = False predecessors = set(dag.predecessors(n)) req_pred = set(req_dag.predecessors(n)) # Also requires to have a non-null successor sets to take into # account the fact that the destination will have new adjacencies # through fake nodes if predecessors ^ req_pred and successors: log.error( 'The predecessors sets for %s differ, ' 'REQ: %s, CURRENT: %s', n, req_pred, predecessors) correct = False if correct: log.info('All forwarding requirements are enforced!') return correct