class ARPTable(object): MAX_ENTRIES = 1024 MAX_PENDING = 5 MAX_PENDING_TIME = 2 def __init__(self): self.by_ip = {} # ip -> entry self.pending = [] # Packets waiting to be sent (ip,(send_args)) self.timer = Timer(self.MAX_PENDING_TIME, self._timer_proc) def __str__(self): sending = set(x for x, y in self.pending) r = [] for ip, e in sorted(self.by_ip.items()): m = "%-15s %16s" % (ip, e.mac) if ip in sending: m += " p" r.append(m) return "\n".join(r) def add_entry(self, ip, mac=None): """ Add an entry The entry can't already exist. It will definitely exist after returning. """ assert ip not in self.by_ip if len(self.by_ip) >= self.MAX_ENTRIES: # Sloppy, but simple. # Get ones with lowest age entries = sorted(self.by_ip.values(), key=lambda entry: entry.age) del entries[self.MAX_ENTRIES:] self.by_ip = {e.mac: e for e in entries} new_entry = ARPEntry(ip=ip, mac=mac) self.by_ip[ip] = new_entry return new_entry def send(self, eth_packet, router_ip=None, src_eth=None, src_ip=None, send_function=None): """ Try to send a packet eth_packet is an ethernet object. src_eth is the source for any ARPs sent. src_ip is the source for any ARPs sent. If the above two are not specified, they are taken from eth_packet. send_function is a function which takes raw bytes to send. If send_function is unset, it is taken from a send_function attribute. """ if send_function is None: send_function = self.send_function ipp = eth_packet.find("ipv4") if not ipp and eth_packet.type == eth_packet.IP_TYPE: if isinstance(eth_packet.payload, bytes): # Hm! Try harder... ipp = pkt.ipv4(raw=eth_packet.payload) if not ipp or eth_packet.dst.is_multicast: send_function(eth_packet.pack()) return if ipp.dstip == pkt.IPV4.IP_BROADCAST: #ipp.dstip = router_ip # Not sure what this was about eth_packet.dst = pkt.ETHERNET.ETHER_BROADCAST send_function(eth_packet.pack()) return if ipp.dstip.is_multicast: eth_packet.dst = ipp.dstip.multicast_ethernet_address send_function(eth_packet.pack()) return if src_ip is None: src_ip = ipp.srcip if src_eth is None: src_eth = eth_packet.src if router_ip is not None: dstip = router_ip else: dstip = ipp.dstip if dstip not in self.by_ip: self.add_entry(dstip) e = self.by_ip[dstip] if e.maybe_refresh(): # Send ARP self._send_arp(dstip, src_eth, src_ip, send_function) if e.mac is not None: eth_packet.dst = e.mac send_function(eth_packet.pack()) else: if len(self.pending) < self.MAX_PENDING: self.pending.append((dstip, (eth_packet, router_ip, src_eth, src_ip, send_function))) def _timer_proc(self): # We just blow away all the entries every interval, so on average, they # live for half the interval. del self.pending[:] def __del__(self): if self.timer: self.timer.cancel() self.timer = None def _send_arp(self, dstip, src_eth, src_ip, send_function): r = pkt.arp() r.opcode = r.REQUEST r.hwdst = pkt.ETHERNET.ETHER_BROADCAST r.protodst = dstip r.hwsrc = src_eth r.protosrc = src_ip e = pkt.ethernet(type=pkt.ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst) e.payload = r log.debug("Sending ARP for %s", dstip) send_function(e.pack()) def rx_arp_reply(self, arp): assert arp.opcode == arp.REPLY self.rx_arp(arp) def rx_arp(self, arp): if arp.protosrc not in self.by_ip: self.add_entry(mac=arp.hwsrc, ip=arp.protosrc) else: self.by_ip[arp.protosrc].confirm(arp.hwsrc) # Send any pending packets for index, (ip, args) in reversed(list(enumerate(self.pending))): if ip == arp.protosrc: del self.pending[index] log.debug("ARP reply allows sending pending packet") self.send(*args)
class AbstractRemoteDomainManager(AbstractDomainManager): """ Abstract class for different remote domain managers. Implement polling mechanism for remote domains. """ # Polling interval POLL_INTERVAL = 3 """Polling interval""" # Request formats DEFAULT_DIFF_VALUE = False """Request formats""" def __init__(self, domain_name=None, adapters=None, **kwargs): """ Init. :param domain_name: domain name :type domain_name: str :param adapters: config of adapters :type adapters: dict :param kwargs: optional params :type kwargs: dict :return: None """ super(AbstractRemoteDomainManager, self).__init__(domain_name=domain_name, adapters=adapters, **kwargs) # Timer for polling function self.__timer = None if 'poll' in kwargs: self._poll = bool(kwargs['poll']) else: self._poll = False if 'diff' in kwargs: self._diff = bool(kwargs['diff']) else: self._diff = self.DEFAULT_DIFF_VALUE self.log.debug("Enforced configuration for %s: poll: %s, diff: %s" % (self.__class__.__name__, self._poll, self._diff)) @property def detected(self): """ Return True if the Manager has detected the domain. :return: domain status :rtype: bool """ return self._detected def get_domain_url(self): if isinstance(self.topoAdapter, AbstractRESTAdapter): return self.topoAdapter.URL ############################################################################## # Abstract functions for component control ############################################################################## def init(self, configurator, **kwargs): """ Abstract function for component initialization. :param configurator: component configurator for configuring adapters :type configurator: :any:`ComponentConfigurator` :param kwargs: optional parameters :type kwargs: dict :return: None """ # Load and initiate adapters using the initiate_adapters() template func self._load_adapters(configurator=configurator, **kwargs) # Skip to start polling if it's set if not self._poll: # Try to request/parse/update Mininet topology if not self._detect_topology(): self.log.warning("%s domain not confirmed during init!" % self.domain_name) else: # Notify all components for topology change --> this event causes # the DoV updating self.raiseEventNoErrors( DomainChangedEvent, domain=self.domain_name, data=self.internal_topo, cause=DomainChangedEvent.TYPE.DOMAIN_UP) else: self.log.info("Start polling %s domain..." % self.domain_name) self.start_polling(self.POLL_INTERVAL) def initiate_adapters(self, configurator): """ Initiate Adapters for DomainManager. Must override in inherited classes. Follows the Factory Method design pattern. :param configurator: component configurator for configuring adapters :type configurator: :any:`ComponentConfigurator` :return: None """ raise NotImplementedError def finit(self): """ Abstract function for starting component. :return: None """ self.stop_polling() super(AbstractRemoteDomainManager, self).finit() ############################################################################## # Common functions for polling ############################################################################## def start_polling(self, interval=1): """ Initialize and start a Timer co-op task for polling. :param interval: polling period (default: 1) :type interval: int :return: None """ if self.__timer: # Already timing return self.__timer = Timer(interval, self.poll, recurring=True, started=True, selfStoppable=True) def restart_polling(self, interval=POLL_INTERVAL): """ Reinitialize and start a Timer co-op task for polling. :param interval: polling period (default: 3) :type interval: int :return: None """ if self.__timer: self.__timer.cancel() self.__timer = Timer(interval, self.poll, recurring=True, started=True, selfStoppable=True) def stop_polling(self): """ Stop timer. :return: None """ if self.__timer: self.__timer.cancel() self.__timer = None @property def polling(self): return self._poll def poll(self): """ Poll the defined domain agent. Handle different connection errors and go to slow/rapid poll. When an agent is (re)detected update the current resource information. :return: None """ # If domain is not detected if not self._detected: # Check the topology is reachable if self._detect_topology(): # Domain is detected and topology is updated -> restart domain polling self.restart_polling() # Notify all components for topology change --> this event causes # the DoV updating self.raiseEventNoErrors( DomainChangedEvent, domain=self.domain_name, data=self.internal_topo, cause=DomainChangedEvent.TYPE.DOMAIN_UP) return # If domain has already detected else: # Check the domain is still reachable changed = self.topoAdapter.check_topology_changed() # No changes if changed is False: # Nothing to do self.log.log( VERBOSE, "Remote domain: %s has not changed!" % self.domain_name) return # Domain has changed elif isinstance(changed, NFFG): self.log.info("Remote domain: %s has changed! " "Update global domain view..." % self.domain_name) self.log.debug("Save changed topology: %s" % changed) # Update the received new topo self.internal_topo = changed # Notify all components for topology change --> this event causes # the DoV updating self.raiseEventNoErrors( DomainChangedEvent, domain=self.domain_name, data=self.internal_topo, cause=DomainChangedEvent.TYPE.DOMAIN_CHANGED) return # If changed is None something went wrong, probably remote domain is not # reachable. Step to the other half of the function elif changed is None: self.log.warning("Lost connection with %s agent! " "Going to slow poll..." % self.domain_name) # Clear internal topology self.log.debug("Clear topology from domain: %s" % self.domain_name) self.internal_topo = None self.raiseEventNoErrors( DomainChangedEvent, domain=self.domain_name, cause=DomainChangedEvent.TYPE.DOMAIN_DOWN) else: self.log.warning( "Got unexpected return value from check_topology_changed(): %s" % type(changed)) return # If this is the first call of poll() if self._detected is None: self.log.warning("Local agent in domain: %s is not detected! " "Keep trying..." % self.domain_name) self._detected = False elif self._detected: # Detected before -> lost connection = big Problem self._detected = False self.restart_polling() else: # No success but not for the first try -> keep trying silently pass ############################################################################## # ESCAPE specific functions ############################################################################## def install_nffg(self, nffg_part): """ Install an :class:`NFFG` related to the specific domain. :raise: :any:`exceptions.NotImplementedError` :param nffg_part: NF-FG need to be deployed :type nffg_part: :class:`NFFG` :return: status if the install process was success :rtype: bool """ raise NotImplementedError def clear_domain(self): """ Clear the Domain according to the first received config. :raise: :any:`exceptions.NotImplementedError` :return: None """ raise NotImplementedError
class MulticastPath(object): """Manages multicast route calculation and installation for a single pair of multicast group and multicast sender.""" def __init__(self, src_ip, src_router_dpid, ingress_port, dst_mcast_address, groupflow_manager, groupflow_trace_event = None): self.src_ip = src_ip self.ingress_port = ingress_port self.src_router_dpid = src_router_dpid self.dst_mcast_address = dst_mcast_address self.path_tree_map = defaultdict(lambda : None) # self.path_tree_map[router_dpid] = Complete path from receiver router_dpid to src self.weighted_topo_graph = [] self.node_list = [] # List of all managed router dpids self.installed_node_list = [] # List of all router dpids with rules currently installed self.receivers = [] # Tuples of (router_dpid, port) self.groupflow_manager = groupflow_manager self.flow_cookie = self.groupflow_manager.get_new_mcast_group_cookie() self.calc_path_tree_dijkstras(groupflow_trace_event) self._last_flow_replacement_time = None self._flow_replacement_timer = None def calc_path_tree_dijkstras(self, groupflow_trace_event = None): """Calculates a shortest path tree from the group sender to all network switches, and caches the resulting tree. Note that this function does not install any flow modifications.""" if not groupflow_trace_event is None: groupflow_trace_event.set_tree_calc_start_time(self.dst_mcast_address, self.src_ip) self._last_flow_replacement_time = time.time() self._calc_link_weights() nodes = set(self.node_list) edges = self.weighted_topo_graph graph = defaultdict(list) for src,dst,cost in edges: graph[src].append((cost, dst)) path_tree_map = defaultdict(lambda : None) queue, seen = [(0,self.src_router_dpid,())], set() while queue: (cost,node1,path) = heappop(queue) if node1 not in seen: seen.add(node1) path = (node1, path) path_tree_map[node1] = path for next_cost, node2 in graph.get(node1, ()): if node2 not in seen: new_path_cost = cost + next_cost heappush(queue, (new_path_cost, node2, path)) self.path_tree_map = path_tree_map log.debug('Calculated shortest path tree for source at router_dpid: ' + dpid_to_str(self.src_router_dpid)) for node in self.path_tree_map: log.debug('Path to Node ' + dpid_to_str(node) + ': ' + str(self.path_tree_map[node])) if not groupflow_trace_event is None: groupflow_trace_event.set_tree_calc_end_time() def _calc_link_weights(self): """Calculates link weights for all links in the network to be used by calc_path_tree_dijkstras(). The cost assigned to each link is based on the link's current utilization (as determined by the FlowTracker module), and the exact manner in which utilization is converted to a link wieght is determined by groupflow_manager.link_weight_type. Valid options are LINK_WEIGHT_LINEAR and LINK_WEIGHT_EXPONENTIAL. Both options include a static weight which is always assigned to all links (determined by groupflow_manager.static_link_weight), and a dynamic weight which is based on the current utilization (determined by groupflow_manager.utilization_link_weight). Setting groupflow_manager.utilization_link_weight to 0 will always results in shortest hop routing. """ curr_topo_graph = self.groupflow_manager.topology_graph self.node_list = list(self.groupflow_manager.node_set) weighted_topo_graph = [] current_util = core.openflow_flow_tracker.get_max_flow_utilization(self.flow_cookie) / core.openflow_flow_tracker.link_max_bw log.info('Current utilization of flow ' + str(self.flow_cookie) + ': ' + str(current_util * core.openflow_flow_tracker.link_max_bw) + ' Mbps') for edge in curr_topo_graph: output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]] raw_link_util = core.openflow_flow_tracker.get_link_utilization_normalized(edge[0], output_port); link_util_mcast_flow = core.openflow_flow_tracker.get_flow_utilization_normalized(edge[0], output_port, self.flow_cookie) link_util = max(0, (raw_link_util * (1 - link_util_mcast_flow))) # link_util = raw_link_util # Uncommenting this line will cause flows to reroute around their own traffic, good for testing # Current utilization here is doubled as a simple attempt to handle variability in flow rates if link_util + (current_util * 2) > 1: link_util = 1 link_weight = 1 if self.groupflow_manager.util_link_weight == 0: link_weight = self.groupflow_manager.static_link_weight else: if self.groupflow_manager.link_weight_type == LINK_WEIGHT_LINEAR: if link_util >= 1: link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links() else: link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * link_util), sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()) elif self.groupflow_manager.link_weight_type == LINK_WEIGHT_EXPONENTIAL: if link_util >= 1: link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links() else: link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * ((1 / (1 - link_util)) - 1)), sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()) log.debug('Router DPID: ' + dpid_to_str(edge[0]) + ' Port: ' + str(output_port) + ' TotalUtil: ' + str(raw_link_util) + ' FlowUtil: ' + str(link_util_mcast_flow) + ' OtherFlowUtil: ' + str(link_util) + ' Weight: ' + str(link_weight)) weighted_topo_graph.append([edge[0], edge[1], link_weight]) self.weighted_topo_graph = weighted_topo_graph log.debug('Calculated link weights for source at router_dpid: ' + dpid_to_str(self.src_router_dpid)) for edge in self.weighted_topo_graph: log.debug(dpid_to_str(edge[0]) + ' -> ' + dpid_to_str(edge[1]) + ' W: ' + str(edge[2])) def install_openflow_rules(self, groupflow_trace_event = None): """Selects routes for active receivers from the cached shortest path tree, and installs/removes OpenFlow rules accordingly.""" reception_state = self.groupflow_manager.get_reception_state(self.dst_mcast_address, self.src_ip) log.debug('Reception state for ' + str(self.dst_mcast_address) + ': ' + str(reception_state)) outgoing_rules = defaultdict(lambda : None) if not groupflow_trace_event is None: groupflow_trace_event.set_route_processing_start_time(self.dst_mcast_address, self.src_ip) # Calculate the paths for the specific receivers that are currently active from the previously # calculated mst edges_to_install = [] calculated_path_router_dpids = [] for receiver in reception_state: if receiver[0] == self.src_router_dpid: continue if receiver[0] in calculated_path_router_dpids: continue # log.debug('Building path for receiver on router: ' + dpid_to_str(receiver[0])) receiver_path = self.path_tree_map[receiver[0]] log.debug('Receiver path for receiver ' + str(receiver[0]) + ': ' + str(receiver_path)) if receiver_path is None: log.warn('Path could not be determined for receiver ' + dpid_to_str(receiver[0]) + ' (network is not fully connected)') continue while receiver_path[1]: edges_to_install.append((receiver_path[1][0], receiver_path[0])) receiver_path = receiver_path[1] calculated_path_router_dpids.append(receiver[0]) # Get rid of duplicates in the edge list (must be a more efficient way to do this, find it eventually) edges_to_install = list(Set(edges_to_install)) if not edges_to_install is None: # log.info('Installing edges:') for edge in edges_to_install: log.debug('Installing: ' + str(edge[0]) + ' -> ' + str(edge[1])) if not groupflow_trace_event is None: groupflow_trace_event.set_route_processing_end_time() groupflow_trace_event.set_flow_installation_start_time() for edge in edges_to_install: if edge[0] in outgoing_rules: # Add the output action to an existing rule if it has already been generated output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]] outgoing_rules[edge[0]].actions.append(of.ofp_action_output(port = output_port)) #log.debug('ER: Configured router ' + dpid_to_str(edge[0]) + ' to forward group ' + \ # str(self.dst_mcast_address) + ' to next router ' + \ # dpid_to_str(edge[1]) + ' over port: ' + str(output_port)) else: # Otherwise, generate a new flow mod msg = of.ofp_flow_mod() msg.hard_timeout = 0 msg.idle_timeout = 0 if edge[0] in self.installed_node_list: msg.command = of.OFPFC_MODIFY else: msg.command = of.OFPFC_ADD msg.match.dl_type = 0x800 # IPV4 msg.match.nw_dst = self.dst_mcast_address msg.match.nw_src = self.src_ip msg.cookie = self.flow_cookie output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]] msg.actions.append(of.ofp_action_output(port = output_port)) outgoing_rules[edge[0]] = msg #log.debug('NR: Configured router ' + dpid_to_str(edge[0]) + ' to forward group ' + \ # str(self.dst_mcast_address) + ' to next router ' + \ # dpid_to_str(edge[1]) + ' over port: ' + str(output_port)) for receiver in reception_state: if receiver[0] in outgoing_rules: # Add the output action to an existing rule if it has already been generated output_port = receiver[1] outgoing_rules[receiver[0]].actions.append(of.ofp_action_output(port = output_port)) #log.debug('ER: Configured router ' + dpid_to_str(receiver[0]) + ' to forward group ' + \ # str(self.dst_mcast_address) + ' to network over port: ' + str(output_port)) else: # Otherwise, generate a new flow mod msg = of.ofp_flow_mod() msg.hard_timeout = 0 msg.idle_timeout = 0 if receiver[0] in self.installed_node_list: msg.command = of.OFPFC_MODIFY else: msg.command = of.OFPFC_ADD msg.cookie = self.flow_cookie msg.match.dl_type = 0x800 # IPV4 msg.match.nw_dst = self.dst_mcast_address msg.match.nw_src = self.src_ip output_port = receiver[1] msg.actions.append(of.ofp_action_output(port = output_port)) outgoing_rules[receiver[0]] = msg #log.debug('NR: Configured router ' + dpid_to_str(receiver[0]) + ' to forward group ' + \ # str(self.dst_mcast_address) + ' to network over port: ' + str(output_port)) # Setup empty rules for any router not involved in this path for router_dpid in self.node_list: if not router_dpid in outgoing_rules and router_dpid in self.installed_node_list: msg = of.ofp_flow_mod() msg.cookie = self.flow_cookie msg.match.dl_type = 0x800 # IPV4 msg.match.nw_dst = self.dst_mcast_address msg.match.nw_src = self.src_ip msg.command = of.OFPFC_DELETE outgoing_rules[router_dpid] = msg #log.debug('Removed rule on router ' + dpid_to_str(router_dpid) + ' for group ' + str(self.dst_mcast_address)) for router_dpid in outgoing_rules: connection = core.openflow.getConnection(router_dpid) if connection is not None: connection.send(outgoing_rules[router_dpid]) if not outgoing_rules[router_dpid].command == of.OFPFC_DELETE: self.installed_node_list.append(router_dpid) else: self.installed_node_list.remove(router_dpid) else: log.warn('Could not get connection for router: ' + dpid_to_str(router_dpid)) log.debug('New flows installed for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie)) if self.groupflow_manager.flow_replacement_mode == PERIODIC_FLOW_REPLACEMENT and self._flow_replacement_timer is None: log.debug('Starting flow replacement timer for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie)) self._flow_replacement_timer = Timer(self.groupflow_manager.flow_replacement_interval, self.update_flow_placement, recurring=True) if not groupflow_trace_event is None: groupflow_trace_event.set_flow_installation_end_time() core.groupflow_event_tracer.archive_trace_event(groupflow_trace_event) def remove_openflow_rules(self): """Removes all OpenFlow rules associated with this multicast group / sender pair. This should be used when the group has no active receivers.""" log.info('Removing rules on all routers for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip)) for router_dpid in self.node_list: msg = of.ofp_flow_mod() msg.cookie = self.flow_cookie msg.match.dl_type = 0x800 # IPV4 msg.match.nw_dst = self.dst_mcast_address msg.match.nw_src = self.src_ip msg.match.in_port = None msg.command = of.OFPFC_DELETE connection = core.openflow.getConnection(router_dpid) if connection is not None: connection.send(msg) else: log.warn('Could not get connection for router: ' + dpid_to_str(router_dpid)) self.installed_node_list = [] if self._flow_replacement_timer is not None: self._flow_replacement_timer.cancel() self._flow_replacement_timer = None def update_flow_placement(self, groupflow_trace_event = None): """Replaces the existing flows by recalculating the cached shortest path tree, and installing new OpenFlow rules.""" self.calc_path_tree_dijkstras(groupflow_trace_event) self.install_openflow_rules(groupflow_trace_event) log.info('Replaced flows for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))
class TestController(object): def __init__(self, connection): self.connected = True self.connection = connection self.counter = 0 self.listeners = [] log.debug('connection: ') log.debug(self.connection) self.listeners = connection.addListeners(self) def _send_random_flow(): if not self.connected: return False log.debug('-- Sending random flow --') # Creates an open flow rule which should send PILO broadcast messages # to our handler delete_msg = of.ofp_flow_mod() delete_msg.command = of.OFPFC_DELETE delete_msg.match.priority = self.counter delete_msg.match.dl_type = pkt.ethernet.IP_TYPE delete_msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL delete_msg.match.nw_dst = IPAddr('100.100.100.100') self.connection.send(delete_msg) self.counter += 1 random_flow = of.ofp_flow_mod() random_flow.priority = self.counter random_flow.match.dl_type = pkt.ethernet.IP_TYPE random_flow.match.nw_proto = pkt.ipv4.UDP_PROTOCOL random_flow.match.nw_dst = IPAddr('100.100.100.100') random_flow.actions.append( of.ofp_action_output(port=of.OFPP_CONTROLLER)) self.connection.send(random_flow) self.timer = Timer(10, _send_random_flow, recurring=True) _send_random_flow() def _handle_PacketIn(self, event): """ Handles packet in messages from a remote pilo switch. """ log.debug(' -- Received Packet in in test controller -- \n\n') log.debug(event) packet = event.parsed # This is the parsed packet data. log.debug(packet) if not packet.parsed: log.warning("Ignoring incomplete packet") return packet_in = event.ofp # The actual ofp_packet_in message. log.debug(packet_in) log.debug(' -- Received Packet in in test controller -- \n\n') def _handle_ConnectionDown(self, event): log.debug('test controller handle connection down') self.connected = False self.timer.cancel() self.connection.removeListeners(self.listeners)