Exemplo n.º 1
0
    def SendFlowStatsReq(self):
        # Send flow stat requests to the first switch in the path of every active flow.

        del self.PolSwitches[:] # empty the plist of switches to be polled in order to calculate it again.

        if core.forwarding.ActFlows:
            log.debug("Sending Flow Stat Request to the first switch in the path of every active flow.")
            # Here is where i should remove non-active flows
            core.forwarding.removeInactiveFlows()

            # log.debug("Before Sending Flow Stats messages the ActFlow list is:")
            for fl in core.forwarding.ActFlows:
                # print fl.flow_match.s_ip, fl.flow_match.d_ip, fl.used_path, fl.active
                if (fl.used_path[0][0]) not in self.PolSwitches:
                    self.PolSwitches.append(fl.used_path[0][0])
        else:
            log.debug("No active flows at the moment, so not sending any flow stat requests")

        self.pol_counter+=1# increment polling session number

        if self.PolSwitches:
            for con in core.openflow.connections:
                if dpid_to_str(con.dpid) in self.PolSwitches:
                    log.debug("SendingFlowStatsRequest to %s: ", dpid_to_str(con.dpid))
                    msg = of.ofp_stats_request(body=of.ofp_flow_stats_request())
                    con.send(msg)
Exemplo n.º 2
0
Arquivo: gephi.py Projeto: wty21cn/srp
    def _handle_LLDPUtil_LinkEvent(self, event):
        s1 = event.link.dpid1
        s2 = event.link.dpid2
        s1 = dpid_to_str(s1)
        s2 = dpid_to_str(s2)
        if s1 > s2:
            s1, s2 = s2, s1

        if s1 not in self.switches: return
        if s2 not in self.switches: return

        if event.added and (s1, s2) not in self.links:
            self.links.add((s1, s2))
            self.send(ae(s1, s2))

            # Do we have abandoned hosts?
            for h, s in self.hosts.iteritems():
                if s == s1:
                    self.send(ae(h, s1))
                elif s == s2:
                    self.send(ae(h, s2))

        elif event.removed and (s1, s2) in self.links:
            self.links.remove((s1, s2))
            self.send(de(s1, s2))
Exemplo n.º 3
0
        def forward (message = None):
            this_dpid = dpid_to_str(event.dpid)
	    
            if packet.dst.is_multicast:
		print "Destination multicast, flooding the packet."
                flood()
                return
            else:
                log.debug("Got unicast packet for %s at %s (input port %d):",
                          packet.dst, dpid_to_str(event.dpid), event.port)
		key_map = ()
                try:
                    #  """ Add your logic here""""
		    print "Inside add your logic"
		    print packet.find('tcp')		
                    key_map = (this_dpid, packet.src, packet.dst, packet.find('tcp').dstport)
                    if not self.portmap.get(key_map):
                        key_map = (this_dpid, packet.src, packet.dst, packet.find('tcp').srcport)
                        if not self.portmap.get(key_map):
                            raise AttributeError

		    print "FOUND: " + str(key_map)
                    next_dpid = self.portmap[key_map]
                    install_fwdrule(event,packet,self.adjacency[this_dpid][next_dpid])

                except AttributeError:
                    log.debug("packet type has no transport ports, flooding")
		    print "FLOOD: " + str(key_map)
                    # flood and install the flow table entry for the flood
                    install_fwdrule(event,packet,of.OFPP_FLOOD)
  def handleUDP (self, event, protocol_udp):
      
      #set path for RTP:
      if self.RTSP_ENABLED:
        log.debug("Handling UDP | protocol_udp: %s"%protocol_udp)
        dpid = event.dpid
        portUDP = protocol_udp.dstport

        for i in dataModel[dpid_to_str(dpid)]["match"]["rtp"]:

          #because of pox errors:
          #getattr(a, 'property', 'default value')
          srcip = getattr(event.parsed.payload, 'srcip', '')         
          dstip = getattr(event.parsed.payload, 'dstip', '')
          log.debug("UDP | srcip: %s | dstip: %s"%(srcip, dstip))

          if (i["ipsrc"]==srcip)and(i["ipdst"]==dstip):
            if portUDP not in i["dstport"]:
              i["dstport"].append(portUDP)
              self.connection[dpid_to_str(dpid)].send( of.ofp_flow_mod( action=of.ofp_action_output( port=i["outport"] ),
                                                    priority=1,
                                                    match=of.ofp_match(nw_proto = pkt.ipv4.UDP_PROTOCOL,
                                                                      dl_type = pkt.ethernet.IP_TYPE,
                                                                      tp_dst=portUDP,
                                                                      nw_src=i["ipsrc"],
                                                                      nw_dst=i["ipdst"]
                                                                      )))   
          else:
            log.warning("UDP packet not defined in dataModel!")  
            
      else:
        log.warning("RTSP is not enabled! UDP/RTP packets can't be exchanged")      
Exemplo n.º 5
0
        def flood(message=None):
            """
            create a packet_out with flood rule
            waiting _flood_delay sec before sending the instruction to the switch

            :message: optional log.debug message

            """

            msg = of.ofp_packet_out()  # create of_packet_out

            # flood
            if time.time() - self.connection.connect_time >= _flood_delay:
                if self.hold_down_expired is False:
                    self.hold_down_expired = True
                    log.info("%s: Flood hold_down expired -- flooding",
                            dpid_to_str(event.dpid))
                if message is not None: log.debug(message)
                log.debug("%i: flood %s -> %s" % (event.dpid, packet.src, packet.dst))
                action = of.ofp_action_output(port=of.OFPP_FLOOD)
                msg.actions.append(action)

            # wait
            else:
                log.info("Holding down flood for %s" % (dpid_to_str(event.dpid)))
                pass

            msg.data = event.ofp
            msg.in_port = event.port
            self.connection.send(msg)
Exemplo n.º 6
0
    def _handle_PacketIn (self, event):
        """
        Handle packet in messages from the switch to implement above algorithm.
        """
        
        packet = event.parsed
        
        buffer_id = event.ofp.buffer_id
        
        if buffer_id is not None:
            log.debug("Buffer id: %s" %(buffer_id))


        if self.datas.isAuthenticated(packet.src, dpid_to_str(event.dpid),event.port):
            #For periodic-authentication or Log-off
            if packet.type == 0x888e:
                log.debug("paquet EAP recu de %s" %(packet.src))
                self.notify_observers('nac',packet=packet, switch=dpid_to_str(event.dpid), port=event.port)
                
            #For DHCP message
            if isinstance(packet.next.next.next, dhcp):
                self.notify_observers('dhcp', packet=packet, port=event.port)
            else:
                log.debug("%s est authentifie" %(packet.src))
                self.L2._handle_PacketIn(event)
            
        #Traitement des paquets EAP
        elif packet.type == 0x888e:
            log.debug("paquet EAP recu de %s" %(packet.src))
            self.notify_observers('nac',packet=packet, switch=dpid_to_str(event.dpid), port=event.port)
            
        else:
            log.debug("autre")
            self.notify_observers('nac',packet=packet, switch=dpid_to_str(event.dpid), port=event.port)
Exemplo n.º 7
0
        def forward (message = None):
            this_dpid = dpid_to_str(event.dpid)

            if packet.dst.is_multicast:
                flood()
                return
            else:
                log.debug("Got unicast packet for %s at %s (input port %d):",
                          packet.dst, dpid_to_str(event.dpid), event.port)

                try:
                    """ Add your logic here"""
                    if self.portmap.get((this_dpid, packet.src, packet.dst, packet.find('tcp').srcport)):
                        new_dpid = self.portmap[(this_dpid, packet.src, packet.dst, packet.find('tcp').srcport)]
                    elif self.portmap.get((this_dpid, packet.src, packet.dst, packet.find('tcp').dstport)):
                        new_dpid = self.portmap[(this_dpid, packet.src, packet.dst, packet.find('tcp').dstport)]
                    elif self.portmap.get((this_dpid, packet.src, packet.dst, 0)):
                        new_dpid = self.portmap[(this_dpid, packet.src, packet.dst, 0)]
                    else:
                        raise AttributeError

                    install_fwdrule(event,packet,self.adjacency[this_dpid][new_dpid])

                except AttributeError:
                    log.debug("packet type has no transport ports, flooding")

                    # flood and install the flow table entry for the flood
                    install_fwdrule(event,packet,of.OFPP_FLOOD)
Exemplo n.º 8
0
def _install_path(prev_path, match):
	dst_sw = prev_path.dst
	cur_sw = prev_path.dst
	dst_pck = match.dl_dst
	
	msg = of.ofp_flow_mod()
	msg.match = match
	msg.idle_timeout = 10
	msg.flags = of.OFPFF_SEND_FLOW_REM	
	msg.actions.append(of.ofp_action_output(port = mac_learning[dst_pck].port))
	log.debug("Installing forward from switch %s to output port %s", util.dpid_to_str(cur_sw), mac_learning[dst_pck].port)
	switches[dst_sw].connection.send(msg)
	
	next_sw = cur_sw
	cur_sw = prev_path.prev[next_sw]
	while cur_sw is not None: #for switch in path.keys():
		msg = of.ofp_flow_mod()
		msg.match = match
		msg.idle_timeout = 10
		msg.flags = of.OFPFF_SEND_FLOW_REM
		log.debug("Installing forward from switch %s to switch %s output port %s", util.dpid_to_str(cur_sw), util.dpid_to_str(next_sw), adj[cur_sw][next_sw])
		msg.actions.append(of.ofp_action_output(port = adj[cur_sw][next_sw]))
		switches[cur_sw].connection.send(msg)
		next_sw = cur_sw
		
		cur_sw = prev_path.prev[next_sw]
    def _handle_openflow_discovery_LinkEvent (self, event):
        # Normalise link direction
        link = event.link.uni

        s1 = link.dpid1
        s2 = link.dpid2
        s1 = dpid_to_str(s1)
        s2 = dpid_to_str(s2)
        p1 = link.port1
        p2 = link.port2

        assert s1 in self.switches
        assert s2 in self.switches

        if event.added and (s1, s2, p1, p2) not in self.links:
            self.links.add((s1, s2, p1, p2))
            self.send(add_link(s1, p1, s2, p2))

            # Do we have abandoned hosts?
            for h,s in self.hosts.iteritems():
                if s == s1: self.send(add_host_link(h, s1))
                elif s == s2: self.send(add_host_link(h, s2))

        elif event.removed and (s1, s2, p1, p2) in self.links:
            self.links.remove((s1, s2, p1, p2))
            self.send(delete_link(s1, s2))
Exemplo n.º 10
0
 def _setup_data_fwd_flows(self, switch_id, dst_mac):
     """
     Given a switch and dst_mac address, setup two flows for data forwarding
     on the switch and its peer switch if the two are not the same. If the
     same, setup only one flow.
     """
     (peer_switch_id, peer_fwd_port) = (self.inception.
                                        mac_to_dpid_port[dst_mac])
     peer_ip = self.inception.dpid_to_ip[peer_switch_id]
     # two switches are different, setup a first flow at switch
     if switch_id != peer_switch_id:
         fwd_port = self.inception.dpid_ip_to_port[(switch_id, peer_ip)]
         core.openflow.sendToDPID(switch_id, of.ofp_flow_mod(
             match=of.ofp_match(dl_dst=dst_mac),
             action=of.ofp_action_output(port=fwd_port),
             priority=priority.DATA_FWD))
         LOGGER.info("Setup forward flow on switch=%s for dst_mac=%s",
                     dpid_to_str(switch_id), dst_mac)
     # Setup flow at the peer switch
     core.openflow.sendToDPID(peer_switch_id, of.ofp_flow_mod(
         match=of.ofp_match(dl_dst=dst_mac),
         action=of.ofp_action_output(port=peer_fwd_port),
         priority=priority.DATA_FWD))
     LOGGER.info("Setup forward flow on switch=%s for dst_mac=%s",
                 dpid_to_str(peer_switch_id), dst_mac)
Exemplo n.º 11
0
	def _handle_LinkEvent(self, event):
		'''handling LinkEvent'''
		link = event.link
		if event.added:
		    log.debug("Received LinkEvent, Link Added from %s to %s over port %d", util.dpid_to_str(link.dpid1), util.dpid_to_str(link.dpid2), link.port1)
		    switch_ports[link.dpid1,link.port1] = link
		    switch_ids[link.dpid1] = util.dpid_to_str(link.dpid1)
		    switch_ids[link.dpid2] = util.dpid_to_str(link.dpid2)
		    if not link.dpid1 in self.graph.keys():
		    	self.graph[link.dpid1] = {}
		    self.graph[link.dpid1][link.dpid2] = 1
		    self.graph[link.dpid1][link.dpid1] = 0

		    if not link.dpid1 in self.wt_graph.keys():
				self.wt_graph[link.dpid1] = {}
		    if not link.dpid2 in self.wt_graph.keys():
				self.wt_graph[link.dpid2] = {}
		    self.wt_graph[link.dpid1][link.dpid2] = {'weight': 0, 'numFlows': 0, 'elephants': 0}
		    self.wt_graph[link.dpid2][link.dpid1] = {'weight': 0, 'numFlows': 0, 'elephants': 0}

		    if not link.dpid1 in self.port_info.keys():
		    	self.port_info[link.dpid1] = {}
		    self.port_info[link.dpid1][link.dpid2] = link.port1

		else:
		    log.debug("Received LinkEvent, Link Removed from %s to %s over port %d", util.dpid_to_str(link.dpid1), util.dpid_to_str(link.dpid2), link.port1)
Exemplo n.º 12
0
        def forward (message = None):
            this_dpid = dpid_to_str(event.dpid)

            if packet.dst.is_multicast:
                flood()
                return
            else:
                log.debug("Got unicast packet for %s at %s (input port %d):",
                          packet.dst, dpid_to_str(event.dpid), event.port)

                try:
			k = (this_dpid, packet.src, packet.dst, packet.find("tcp").dstport)
			if not self.portmap.get(k):
				k = (this_dpid, packet.src, packet.dst, packet.find("tcp").srcport)
				if not self.portmap.get(k):
					raise AttributeError
			ndpid = self.portmap[k]
			log.debug("install: %s output %d" % (str(k), self.adjacency[this_dpid][ndpid]))
			install_fwdrule(event,packet,self.adjacency[this_dpid][ndpid])
			
                except AttributeError:
                    log.debug("packet type has no transport ports, flooding")

                    # flood and install the flow table entry for the flood
                    install_fwdrule(event,packet,of.OFPP_FLOOD)
Exemplo n.º 13
0
    def SendStatsReq(self):
        # Send flow stat requests to every switch that has active flows.

        del self.PolSwitches[:] # empty the plist of switches to be polled in order to calculate it again.

        if core.forwarding.ActFlows:
            log.debug("Sending Port Stat Requests to all switches that have active flows")
            # Remove non-active flows in case they exist in the list
            core.forwarding.removeInactiveFlows()

            # log.debug("Before Sending Port Stats messages the ActFlow list is:")
            for fl in core.forwarding.ActFlows:
                # print fl.flow_match.s_ip, fl.flow_match.d_ip, fl.used_path, fl.active
                for r in fl.used_path:
                    if (fl.used_path[fl.used_path.index(r)][0]) not in self.PolSwitches:
                        self.PolSwitches.append(fl.used_path[fl.used_path.index(r)][0])
        else:
            log.debug("No active flows at the moment, so not sending any port stat requests")

        self.pol_counter+=1# increment polling session number

        if self.PolSwitches:
            for con in core.openflow.connections:
                if dpid_to_str(con.dpid) in self.PolSwitches:
                    log.debug("Sending Port Stats Request to %s: ", dpid_to_str(con.dpid))
                    msg = of.ofp_stats_request(body=of.ofp_port_stats_request())
                    con.send(msg)
Exemplo n.º 14
0
    def SendFlowStatsReq(self):
        # Send flow stat requests to every switch that has active flows.

        del self.PolSwitches[:] # empty the plist of switches to be polled in order to calculate it again.

        # I must remove old link current values so that calculations are executed correctly when i get responses
        core.current_topology.resetLinkCounters()

        if core.forwarding.ActFlows:
            log.debug("Sending Flow Stat Requests to all switches that have active flows")
            # Here is where i should remove non-active flows
            core.forwarding.removeInactiveFlows()

            # log.debug("Before Sending Flow Stats messages the ActFlow list is:")
            for fl in core.forwarding.ActFlows:
                # print fl.flow_match.s_ip, fl.flow_match.d_ip, fl.used_path, fl.active
                for r in fl.used_path:
                    if (fl.used_path[fl.used_path.index(r)][0]) not in self.PolSwitches:
                        self.PolSwitches.append(fl.used_path[fl.used_path.index(r)][0])
        else:
            log.debug("No active flows at the moment, so not sending any flow stat requests")


        self.pol_counter+=1# increment polling session number

        if self.PolSwitches:
            for con in core.openflow.connections:
                if dpid_to_str(con.dpid) in self.PolSwitches:
                    log.debug("SendingFlowStatsRequest to %s: ", dpid_to_str(con.dpid))
                    msg = of.ofp_stats_request(body=of.ofp_flow_stats_request())
                    con.send(msg)
    def _handle_LinkEvent (self, event):
		l = event.link
		sw1 = dpid_to_str(l.dpid1)
		sw2 = dpid_to_str(l.dpid2)
		log.debug ("link %s[%d] <-> %s[%d]",sw1, l.port1,sw2, l.port2)
		self.adjacency[sw1][sw2] = l.port1
		self.adjacency[sw2][sw1] = l.port2
Exemplo n.º 16
0
  def _handle_openflow_PacketOut(self, event):
    squelch = False

    dpid = event.connection.dpid
    inport = event.port
    packet = event.parsed
    if not packet.parsed:
      log.warning("%s: ignoring unparsed packet", dpid_to_str(dpid))
      return

    a = packet.find('arp')
    if not a: return
    if a.prototype == arp.PROTO_TYPE_IP:
      if a.hwtype == arp.HW_TYPE_ETHERNET:
        if a.protosrc != 0:
          if a.opcode == arp.REPLY:
            if _learn:
              if a.protosrc in _arp_table:
                if _arp_table[a.protosrc] != a.hwsrc:
                  log.warn("%s RE-learned %s: %s->%s", dpid_to_str(dpid),
                      a.protosrc, _arp_table[a.protosrc].mac, a.hwsrc)
              else:
                log.info("%s learned %s", dpid_to_str(dpid), a.protosrc)
              if a.hwsrc != EthAddr("11:11:11:11:11:11"):
                _arp_table[a.protosrc] = Entry(a.hwsrc)
              else:
                del _arp_table[a.protosrc]
              #              print "PacketOut"
#               print _arp_table
              return
Exemplo n.º 17
0
	def __init__ (self, connection):
		self.connection = connection
		connection.addListeners(self)
		self.forwardTable = ForwardTable(self.connection.dpid)

		self.ipToPort = {}
		self.ipToMac = {}
		self.packetQueue = {}

		self.mac = EthAddr("00:12:34:56:78:9" + dpid_to_str(self.connection.dpid)[-1:])
		self.ip = IPAddr("10.0."+ str(int(dpid_to_str(self.connection.dpid)[-1:])-2) +".1")
		print "Router Details:",self.connection.dpid,self.mac,self.ip


		self.inf_ip = {}
		self.inf_port = {}
		self.inf_ip["r1-eth1"] = "192.0.1.1"
		self.inf_ip["r1-eth2"] = "192.0.4.1"
		self.inf_ip["r1-eth3"] = self.ip

		self.inf_port["r1-eth1"] = 0
		self.inf_port["r1-eth2"] = 1
		self.inf_port["r1-eth3"] = 2

		# LSU Fields
		self.counter = 0
		update_thread = threading.Thread(target=self.setupUpdateLoop)
		update_thread.daemon = True
		update_thread.start()
Exemplo n.º 18
0
    def _calc_link_weights(self):
        """Calculates link weights for all links in the network to be used by calc_path_tree_dijkstras().

        The cost assigned to each link is based on the link's current utilization (as determined by the FlowTracker
        module), and the exact manner in which utilization is converted to a link wieght is determined by
        groupflow_manager.link_weight_type. Valid options are LINK_WEIGHT_LINEAR and LINK_WEIGHT_EXPONENTIAL. Both options
        include a static weight which is always assigned to all links (determined by groupflow_manager.static_link_weight),
        and a dynamic weight which is based on the current utilization (determined by
        groupflow_manager.utilization_link_weight). Setting groupflow_manager.utilization_link_weight to 0 will always
        results in shortest hop routing.
        """
        curr_topo_graph = self.groupflow_manager.topology_graph
        self.node_list = list(self.groupflow_manager.node_set)
        
        weighted_topo_graph = []
        current_util = core.openflow_flow_tracker.get_max_flow_utilization(self.flow_cookie) / core.openflow_flow_tracker.link_max_bw
        log.info('Current utilization of flow ' + str(self.flow_cookie) + ': ' + str(current_util * core.openflow_flow_tracker.link_max_bw) + ' Mbps')
        
        for edge in curr_topo_graph:
            output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]
            raw_link_util = core.openflow_flow_tracker.get_link_utilization_normalized(edge[0], output_port);
            link_util_mcast_flow = core.openflow_flow_tracker.get_flow_utilization_normalized(edge[0], output_port, self.flow_cookie)
            
            link_util = max(0, (raw_link_util * (1 - link_util_mcast_flow)))
            
            # link_util = raw_link_util # Uncommenting this line will cause flows to reroute around their own traffic, good for testing
            
            # Current utilization here is doubled as a simple attempt to handle variability in flow rates
            if link_util + (current_util * 2) > 1:
                link_util = 1
            
            link_weight = 1
            
            if self.groupflow_manager.util_link_weight == 0:
                link_weight = self.groupflow_manager.static_link_weight
            else:
                if self.groupflow_manager.link_weight_type == LINK_WEIGHT_LINEAR:
                    if link_util >= 1:
                        link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()
                    else:
                        link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * link_util),
                                sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links())
                elif self.groupflow_manager.link_weight_type == LINK_WEIGHT_EXPONENTIAL:
                    if link_util >= 1:
                        link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()
                    else:
                        link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * ((1 / (1 - link_util)) - 1)),
                                sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links())
                
                log.debug('Router DPID: ' + dpid_to_str(edge[0]) + ' Port: ' + str(output_port) + 
                        ' TotalUtil: ' + str(raw_link_util) + ' FlowUtil: ' + str(link_util_mcast_flow) + ' OtherFlowUtil: ' + str(link_util) 
                        + ' Weight: ' + str(link_weight))

            weighted_topo_graph.append([edge[0], edge[1], link_weight])
        self.weighted_topo_graph = weighted_topo_graph
        
        log.debug('Calculated link weights for source at router_dpid: ' + dpid_to_str(self.src_router_dpid))
        for edge in self.weighted_topo_graph:
            log.debug(dpid_to_str(edge[0]) + ' -> ' + dpid_to_str(edge[1]) + ' W: ' + str(edge[2]))
Exemplo n.º 19
0
	def __repr__(self):
		ret = util.dpid_to_str(self.dst)
		u = self.prev[self.dst]
		while(u != None):
			ret = util.dpid_to_str(u) + "->" + ret
			u = self.prev[u]
		
		return ret			
Exemplo n.º 20
0
	def _handle_LinkEvent(self,event):
		global Switch_set
		global Link_set
		dpid1 = dpid_to_str(event.link.dpid1)  #OpenFlow 交换机 1 的 dpid
		dpid2 = dpid_to_str(event.link.dpid2)  #OpenFlow 交换机 2 的 dpid
		port1 = event.link.port1  #OpenFlow 交换机 1 通过端口 port1连接到该链路上
		port2 = event.link.port2  #OpenFlow 交换机 2 通过端口 port2连接到该链路上
		if event.added == True:
			# 更新 Switch_set
			if dpid1 not in Switch_set:
				Switch_set[dpid1] = {}
			Switch_set[dpid1].update({port1:(dpid2,port2)})

			if dpid2 not in Switch_set:
				Switch_set[dpid2] = {}
			Switch_set[dpid2].update({port2:(dpid1,port1)})

			# 更新 Link_set
			if (dpid1, dpid2) not in Link_set and (dpid2, dpid1) not in Link_set:
				Link_set.append((dpid1, dpid2)) 

		elif event.removed == True:
			# 更新 Switch_set
			if dpid1 not in Switch_set:
				pass

			elif not Switch_set[dpid1]:
				del Switch_set[dpid1]

			elif port1 in Switch_set[dpid1]:
				del Switch_set[dpid1][port1]

			else:
				pass

			if dpid2 not in Switch_set:
				pass

			elif not Switch_set[dpid2]:
				del Switch_set[dpid2]

			elif port2 in Switch_set[dpid2]:
				del Switch_set[dpid2][port2]

			else:
				pass

			# 更新 Link_set
			if (dpid1, dpid2) in Link_set or (dpid2, dpid1) in Link_set:
				if (dpid1, dpid2) in Link_set:
					Link_set.remove((dpid1,dpid2))

				elif (dpid2, dpid1) in Link_set:
					Link_set.remove((dpid2,dpid1)) 

		else:
			pass
Exemplo n.º 21
0
  def _handle_PacketIn (self, event):
    dpid = event.connection.dpid
    inport = event.port
    packet = event.parsed

    a = packet.find('arp')
    if not a: return

    if a.prototype != arp.PROTO_TYPE_IP:
      return

    if a.hwtype != arp.HW_TYPE_ETHERNET:
      return

    if a.opcode == arp.REQUEST:
      log.debug("%s ARP request %s => %s", dpid_to_str(dpid),
                a.protosrc, a.protodst)

      if self.use_port_mac:
        src_mac = event.connection.ports[inport].hw_addr
      else:
        src_mac = event.connection.eth_addr
      ev = ARPRequest(event.connection, a, src_mac,
                      self.eat_packets, inport)
      self.raiseEvent(ev)
      if ev.reply is not None:
        r = arp()
        r.hwtype = a.hwtype
        r.prototype = a.prototype
        r.hwlen = a.hwlen
        r.protolen = a.protolen
        r.opcode = arp.REPLY
        r.hwdst = a.hwsrc
        r.protodst = a.protosrc
        r.protosrc = a.protodst
        r.hwsrc = EthAddr(ev.reply)
        e = ethernet(type=packet.type, src=ev.reply_from, dst=a.hwsrc)
        e.payload = r
        log.debug("%s answering ARP for %s" % (dpid_to_str(dpid),
            str(r.protosrc)))
        msg = of.ofp_packet_out()
        msg.data = e.pack()
        msg.actions.append(of.ofp_action_output(port =
                                                of.OFPP_IN_PORT))
        msg.in_port = inport
        event.connection.send(msg)
        return EventHalt if ev.eat_packet else None

    elif a.opcode == arp.REPLY:
      log.debug("%s ARP reply %s => %s", dpid_to_str(dpid),
                a.protosrc, a.hwsrc)

      ev = ARPReply(event.connection,a,self.eat_packets,inport)
      self.raiseEvent(ev)
      return EventHalt if ev.eat_packet else None

    return EventHalt if self.eat_packets else None
Exemplo n.º 22
0
 def _handle_openflow_discovery_LinkEvent (self, event):
   (dp1,p1),(dp2,p2) = event.link.end
   self.adj[dp1][dp2] = 1
   self.ports[dp1][dp2] = p1
   log.warning(
     "Link %s -> %s is discovered.", 
     dpid_to_str(dp1), 
     dpid_to_str(dp2) 
   )
        def forward (message = None):
            this_dpid = dpid_to_str(event.dpid)

            if packet.dst.is_multicast:
                flood()
                return
            else:
                log.debug("Got unicast packet for %s at %s (input port %d):",
                          packet.dst, dpid_to_str(event.dpid), event.port)
                '''
Exemplo n.º 24
0
 def _handle_ConnectionUp (self, event):
     log.debug("Switch %s has come up.", dpid_to_str(event.dpid))
     ports[event.dpid] = {}
     for i in event.ofp.ports:
         ports[event.dpid][i.port_no] = i.curr
     # AJUSTE MANUAL ------> BORRAR
     if dpid_to_str(event.dpid) == '00-00-00-00-00-02':
     	ports[event.dpid][2] = 130
     if dpid_to_str(event.dpid) == '00-00-00-00-00-03':
     	ports[event.dpid][3] = 130
Exemplo n.º 25
0
def _print_rev_path(dst_pck, src, dst, prev_path):
	str = "Reverse path from %s to %s over: [%s->dst over port %s]" % (util.dpid_to_str(src), util.dpid_to_str(dst), util.dpid_to_str(dst), mac_learning[dst_pck].port)
	next_sw = dst
	cur_sw = prev_path[next_sw]
	while cur_sw != None: #for switch in path.keys():
		str += "[%s->%s over port %s]" % (util.dpid_to_str(cur_sw), util.dpid_to_str(next_sw), adj[cur_sw][next_sw])
		next_sw = cur_sw
		cur_sw = prev_path[next_sw]
		
	log.debug(str)
Exemplo n.º 26
0
 def _handle_PortStatus(self,event):
     if event.added or (event.modified and event.ofp.desc.config is 0x00000000 and event.ofp.desc.state is 0x00000000):
         log.debug("[%s] Port %i is up!",dpid_to_str(event.dpid),event.port)
         lldp_buffer = list()
         lldp  = self._create_lldp_packet(event.dpid,event.port,event.ofp.desc.hw_addr)
         lldp_buffer.append(LLDPUtil.SendItem(event.dpid,event.port,lldp))
         self._set_send_lldp_timer(lldp_buffer)
     elif event.deleted or (event.modified and (event.ofp.desc.config is 0x0000001 or event.ofp.desc.state is 0x00000001)):
         log.debug("[%s] Port %i is down!",dpid_to_str(event.dpid),event.port)
         self._delete_link(event.dpid,event.port)
Exemplo n.º 27
0
 def _handle_LinkUtilizationEvent(self, event):
     """Processes LinkUtilizationEvents (generated by the FlowTracker module), and replaces flows that traverse the specified link"""
     
     if event.link_utilization >= core.openflow_flow_tracker.link_max_bw:
         log.debug('Link Fully Utilized! Switch:' + dpid_to_str(event.router_dpid) + ' Port:' + str(event.output_port))
     
     # Ignore the event if congestion threshold based flow replacement is not enabled
     if self.flow_replacement_mode != CONG_THRESHOLD_FLOW_REPLACEMENT:
         return
         
     log.debug('Got LinkUtilEvent - Switch: ' + dpid_to_str(event.router_dpid) + ' Port: ' + str(event.output_port) + '\n\tUtil: ' + str(event.link_utilization))
         
     replacement_time = time.time()
     
     # 1) Determine the amount of utilization that should be replaced to bring the link back under the congestion threshold
     replacement_utilization = event.link_utilization - event.cong_threshold
     if replacement_utilization < 0:
         log.warn('LinkUtilizationEvent specified negative replacement utilization.')
         return
     log.debug('Attempting replacement of ' + str(replacement_utilization) + ' Mbps of flows')
     
     # 2) Build a list of the flows managed by this module that are contributing to congestion, sorted by decreasing utilization
     replacement_flows = []
     for event_flow_cookie in event.flow_map:
         if event_flow_cookie in self.multicast_paths_by_flow_cookie:
             replacement_flows.append((event_flow_cookie, event.flow_map[event_flow_cookie]))
     replacement_flows.sort(key = lambda flow: flow[1])
     log.debug('Candidates for flow replacement: ' + str(replacement_flows))
     
     # 3) Replace flows until all candidates have been processed, or the targetted replacement utilization is reached
     # Note that flows which have been recently replaced will not be replaced again
     replaced_utilization = 0
     for flow in replacement_flows:
         log.debug('FlowCookie: ' + str(flow[0]) + ' CurrentTime: ' + str(replacement_time) + ' LastReplacementTime: ' + str(self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time))
         if self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time is not None:
             log.debug('Replacement Interval: ' + str(self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time))
             
         if (self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time is None) or (
                 replacement_time - self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time >= self.flow_replacement_interval):
             log.debug('Replacing multicast flow with cookie: ' + str(flow[0]) + ' Bitrate: ' + str(flow[1]) + ' Mbps')
             self.multicast_paths_by_flow_cookie[flow[0]].update_flow_placement()
         
             replaced_utilization += flow[1]
             # Note: This causes the replacement to stop after replacing a single flow (may help prevent thrashing)
             # Uncomment this to have the module replace flows until the current link utilization minus the replacement bandwidth 
             # is less than the link's congestion threshold.
             break
         
         # Note: Flows which are not actually replaced are counted toward the replacement utilization here, as it assumed that these flows
         # are already in the process of being replaced (this assumption should hold valid as long as the flow replacement interval is not
         # greater than 3 sampling intervals of the flow tracker)
         if replaced_utilization >= replacement_utilization:
             break
     
     log.debug('Replaced ' + str(replaced_utilization) + ' Mbps of flows')
Exemplo n.º 28
0
        def forward (message = None):
            this_dpid = dpid_to_str(event.dpid)

            if packet.dst.is_multicast:
                flood()
                return

            else:
                log.debug("Got unicast packet for %s at %s (input port %d):",
                          packet.dst, dpid_to_str(event.dpid), event.port)

                if tcpp:
                    log.debug("Received tcp packet on %s" % this_dpid)
                    log.debug("Packet L2 src : %s" % packet.src)
                    log.debug("Packet L2 dst : %s" % packet.dst)
                    log.debug("Packet dst port : %s" % tcpp.dstport)

                    l2_src = EthAddr(packet.src)
                    l2_dst = EthAddr(packet.dst)
                    l4_src = int(tcpp.srcport)
                    l4_dst = int(tcpp.dstport)

                    if l4_dst != 80:
                        l4_dst = '*'
                   
                    # our table only bases upon the destination, but we want to guarantee
                    # the reverse direction traffic also goes in the same way. 
                    if l4_src == 80:
                        l4_dst = 80 
                    
                    # if we are on s2 or s3, simply forward to the output port
                    if this_dpid == "00-00-00-00-00-02" or this_dpid == "00-00-00-00-00-03":
                        if event.port == 1:
                            output_port = 2
                        else:
                            output_port = 1
                        install_fwdrule(event,packet,int(output_port))

                    # if we are on s1 or s4
                    if this_dpid == "00-00-00-00-00-01" or this_dpid == "00-00-00-00-00-04":
                        nexthop = self.portmap[(this_dpid,l2_dst,l4_dst)]
                        
                        if nexthop.split()[0] == "Port":
                            print "**********************************GOT A HIT"
                            output_port = nexthop.split()[1]
                            print "output port is %s" % output_port 
                        else:
                            output_port = self.adjacency[this_dpid][nexthop] 
                        
                        install_fwdrule(event,packet,int(output_port))
 
                else:
                    log.debug("packet type has no transport ports, flooding")
                    install_fwdrule(event,packet,of.OFPP_FLOOD)
Exemplo n.º 29
0
def handle_port_stats(event):
    stats = event.stats
    for stat in stats:
      if dev_port_ip.get(dpid_to_str(event.dpid)):# if entry exists
        if dev_port_ip.get(dpid_to_str(event.dpid)).get(stat.port_no):# if entry exists
          add_port_entry(dev_port_ip[dpid_to_str(event.dpid)][stat.port_no], stat.tx_bytes,
                         stat.rx_bytes, stat.tx_packets, stat.rx_packets)
    if layer1_correct:
        if checker:
            violation = checker.check_if_ports_legal(ip_bytes_sent, 
                                            ip_bytes_recv, ip_packets_sent, ip_packets_recv)
            decider(violation, None)
Exemplo n.º 30
0
  def _set_path_on_swtiches (self, pid, path):
    for k, sw in enumerate(path):
      msg = of.ofp_flow_mod()

      if USE_VLAN_TAG:
        msg.match.dl_vlan = pid
      if USE_ETHERNET_SRC_TAG:
        msg.match.dl_src = EthAddr( self._int_to_MAC( pid ) ) # match ethernet addr

      if k < 1:
        # First one, host
        continue
      if k > len(path)-2: 
        # Last one, host
        continue

      if k == len(path) - 2:
        # sw -> host
        
        if USE_VLAN_TAG:
          # strip vlan tag then send to host
          msg.actions.append(
            of.ofp_action_strip_vlan()
          )
        if USE_ETHERNET_SRC_TAG:
          # add back the real src
          msg.actions.append(
            of.ofp_action_dl_addr.set_src( EthAddr(path[0]) ) 
          )

        msg.actions.append(
          of.ofp_action_output( port = self.hadj[path[k+1]][sw] )
        )
        core.openflow.sendToDPID(sw, msg)
        log.warning(
          "Set rule: %s -> %s via port %i",
          dpid_to_str(sw),
          path[k+1],
          self.hadj[path[k+1]][sw]
        )
      else:
        # sw -> sw
        msg.actions.append(
          of.ofp_action_output( port = self.ports[sw][path[k+1]] )
        )
        core.openflow.sendToDPID(sw, msg)
        log.warning(
          "Set rule: %s -> %s via port %i",
          dpid_to_str(sw),
          dpid_to_str(path[k+1]),
          self.ports[sw][path[k+1]]
        )
Exemplo n.º 31
0
    def flood (message = None):
      """ Floods the packet """
      msg = of.ofp_packet_out()
      if time.time() - self.connection.connect_time >= _flood_delay:
        # Only flood if we've been connected for a little while...

        if self.hold_down_expired is False:
          # Oh yes it is!
          self.hold_down_expired = True
          log.info("%s: Flood hold-down expired -- flooding",
              dpid_to_str(event.dpid))

        if message is not None: log.debug(message)
        #log.debug("%i: flood %s -> %s", event.dpid,packet.src,packet.dst)
        # OFPP_FLOOD is optional; on some switches you may need to change
        # this to OFPP_ALL.
        msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
      else:
        pass
        #log.info("Holding down flood for %s", dpid_to_str(event.dpid))
      msg.data = packet
      msg.in_port = event.port
      self.connection.send(msg)
Exemplo n.º 32
0
    def get_link_utilization_mbps(self, switch_dpid, output_port):
        """Returns the current estimated utilization (in Mbps) on the specified switch and output port.

        If a utilization is available based on port stats from the receive side of the specified link, this value will
        be returned (as port stats are more reliable in Mininet than flow stats). If port stats are not available (which
        would occur when the opposite side of the link is not being tracked) then a utilization estimate derived from
        flow stats will be returned.
        """
        # First, get the switch on the other side of this link
        receive_switch_dpid = None
        receive_port = None
        for link in core.openflow_discovery.adjacency:
            if link.dpid1 == switch_dpid and link.port1 == output_port:
                receive_switch_dpid = link.dpid2
                receive_port = link.port2
                break

        if receive_switch_dpid is None:
            # Reception statistics unavailable, use the transmission statistics if available
            log.warn("PortStats unavailable for Switch: " +
                     dpid_to_str(switch_dpid) + ' Port: ' + str(output_port))
            if switch_dpid in self.switches:
                if output_port in self.switches[
                        switch_dpid].flow_total_average_bandwidth_Mbps:
                    return self.switches[
                        switch_dpid].flow_total_average_bandwidth_Mbps[
                            output_port]
            return 0  # TODO: May want to throw exception here

        if receive_switch_dpid in self.switches:
            if receive_port in self.switches[
                    receive_switch_dpid].port_average_bandwidth_Mbps:
                return self.switches[
                    receive_switch_dpid].port_average_bandwidth_Mbps[
                        receive_port]

        return 0  # TODO: May want to throw exception here
Exemplo n.º 33
0
  def install_flow (self, con_or_dpid, priority = None):
    if priority is None:
      priority = self._flow_priority
    if isinstance(con_or_dpid, (int,long)):
      con = core.openflow.connections.get(con_or_dpid)
      if con is None:
        log.warn("Can't install flow for %s", dpid_to_str(con_or_dpid))
        return False
    else:
      con = con_or_dpid 

    #portPerSwitch stores all ports in one switch
    portItem = namedtuple("portItem",('port_num','port_addr'))
    portPerSwitch = []
    # Store all ports of a switch
    for p in _portInfo:
      if p.dpid == con.dpid:
        portPerSwitch.append(portItem(p.port_num,p.port_addr))
    #print'S',con.dpid,'ports',portPerSwitch

    for pByID in _portInfo:
      if pByID.port_num != 65534 and pByID.dpid == con.dpid:
        match = of.ofp_match(dl_type = pkt.ethernet.LLDP_TYPE,
                          dl_dst = pkt.ETHERNET.NDP_MULTICAST,in_port = pByID.port_num)
        #print'Ingress Port:',pByID.port_num
        msg = of.ofp_flow_mod()
        msg.priority = priority
        msg.match = match
        msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
        for p in portPerSwitch:
          if p.port_num != 65534 and p.port_num != pByID.port_num:
            #print'Install flow====Rewrites',p.port_addr,', send Out from',p.port_num
            msg.actions.append(of.ofp_action_dl_addr.set_src(p.port_addr))
            msg.actions.append(of.ofp_action_output(port = p.port_num))
        con.send(msg)
        #print'Installing flows to switch', con.dpid
    return True
    def _handle_ConnectionUp(self, event):
        log.debug("Connection %s" % (event.connection, ))
        dpid_in = dpid_to_str(event.dpid)
        split_str = dpid_in.split()
        #Altering the DPID in desired format
        dpid_in = split_str[0]
        log.info("DPID %s", dpid_in)
        dpid_1 = "00-00-00-00-00-01"
        dpid_2 = "00-00-00-00-00-02"
        dpid_3 = "00-00-00-00-00-03"
        dpid_4 = "00-00-00-00-00-04"
        dpid_5 = "00-00-00-00-00-05"
        dpid_6 = "00-00-00-00-00-06"
        dpid_7 = "00-00-00-00-00-07"
        dpid_8 = "00-00-00-00-00-08"
        dpid_9 = "00-00-00-00-00-09"
        dpid_10 = "00-00-00-00-00-0a"
        dpid_11 = "00-00-00-00-00-0b"

        if dpid_in == dpid_6:
            #Running Firewall-1 Module
            Fw1(event.connection,
                self.transparent)  #Fw1 is defined in Firewall.py file
            log.info("firewall 1 initiated!")
        elif dpid_in == dpid_7:
            #Running Firewall-2 Module
            Fw2(event.connection,
                self.transparent)  #Fw2 is defined in Firewall.py file
            log.info("firewall 2 initiated!")
        elif dpid_in == dpid_1 or dpid_in == dpid_2 or dpid_in == dpid_3 or dpid_in == dpid_4 or dpid_in == dpid_5:
            #Running Learning Switch Module
            LearningSwitch(event.connection, self.transparent
                           )  #LearningSwitch is defined in l2_learning.py file
            log.info("Learning Switch initiated!")
        else:
            #CLICK will handle this swich
            log.info("CLICK handles this switch!")
Exemplo n.º 35
0
    def _handle_LinkEvent(self, event):
        """
        Esta funcion es llamada cada vez que openflow_discovery descubre un nuevo enlace
        """

        link = event.link
        log.info("Link has been discovered from %s,%s to %s,%s", dpid_to_str(link.dpid1), link.port1, dpid_to_str(link.dpid2), link.port2)

        switch1 = self.fat_tree.get_switch_por_dpid(link.dpid1)
        switch2 = self.fat_tree.get_switch_por_dpid(link.dpid2)
        link = Link(switch1, switch2, event.link)

        for switch_controller in self.switches:
            switch_controller.set_levels(len(self.fat_tree.niveles))
            if switch_controller.dpid == switch1.dpid and event.added:
                switch_controller.add_link(link)

        if event.added:
            self.fat_tree.agregar_link(link)
            print("Link agregado", link)
        else:
            self.fat_tree.quitar_link(link)
            self._wipe_switches_tables()
            print("Link quitado", link)
Exemplo n.º 36
0
    def _handle_PacketIn_normal(self, event):
        """
        Handle packet in messages from the switch to implement the above algorithm.
        """

        packet = event.parsed
        self.macToPort[packet.src] = event.port  # 1

        def flood():
            """
            Floods the packet to all other ports.
            """
            msg = of.ofp_packet_out()
            msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
            msg.data = event.ofp
            msg.in_port = event.port
            self.connection.send(msg)

        if packet.dst not in self.macToPort:  # 2
            flood()  # 2a
        else:
            out_port = self.macToPort[packet.dst]
            if out_port == event.port:  # 3
                log.warning("[L2] Same port for packet from %s -> %s on %s.%s."
                            "Dropping..." %
                            (packet.src, packet.dst, dpid_to_str(
                                event.dpid), out_port))
                return  # 3a
            # 4
            log.debug("[L2] Installing flow for %s.%i -> %s.%i" %
                      (packet.src, event.port, packet.dst, out_port))
            msg = of.ofp_flow_mod()
            msg.match = of.ofp_match.from_packet(packet, event.port)
            msg.actions.append(of.ofp_action_output(port=out_port))
            msg.data = event.ofp  # 6a
            self.connection.send(msg)
Exemplo n.º 37
0
 def start_switch(event):
     log.debug("Controlling %s" % (event.connection, ))
     global s1_dpid, s2_dpid
     print("ConnectionUp: ", dpid_to_str(event.connection.dpid))
     #remember the connection dpid for switch
     for m in event.connection.features.ports:
         if m.name == "s1-eth1":
             s1_dpid = event.connection.dpid
             print("s1_dpid=", s1_dpid)
             print("Assigning router functionality to S1 with dpid",
                   s1_dpid)
             Router(event.connection, s1_dpid)
         elif m.name == "s2-eth1":
             s2_dpid = event.connection.dpid
             print("s2_dpid=", s2_dpid)
             print("Assigning switch functionality to S2 with dpid",
                   s2_dpid)
             Switch(event.connection, s2_dpid)
         elif m.name == "s3-eth1":
             s3_dpid = event.connection.dpid
             print("s3_dpid=", s3_dpid)
             print("Assigning switch functionality to S2 with dpid",
                   s3_dpid)
             Switch(event.connection, s3_dpid)
Exemplo n.º 38
0
    def set_tracked_ports(self, tracked_ports):
        """Sets the port numbers on which bandwidth utilization should be tracked for this switch.
        
        * tracked_ports: List of integer port numbers on which utilization should be tracked for this switch
        """
        self.tracked_ports = tracked_ports
        log.debug('Switch ' + dpid_to_str(self.dpid) + ' set tracked ports: ' +
                  str(tracked_ports))
        # Delete any stored state on ports which are no longer tracked
        keys_to_del = []
        for port_no in self.flow_interval_byte_count:
            if not port_no in self.tracked_ports:
                keys_to_del.append(port_no)

        for key in keys_to_del:
            del self.flow_total_byte_count[key]
            del self.flow_interval_byte_count[key]
            del self.flow_interval_bandwidth_Mbps[key]
            del self.flow_average_bandwidth_Mbps[key]

            del self.port_total_byte_count[key]
            del self.port_interval_byte_count[key]
            del self.port_interval_bandwidth_Mbps[key]
            del self.port_average_bandwidth_Mbps[key]
Exemplo n.º 39
0
    def _handle_HostEvent(self, event):
        global Hosts
        mac = str(event.entry.macaddr)
        to_switch = dpid_to_str(event.entry.dpid)
        port = event.entry.port
        if event.join == True:
            if mac not in Hosts:
                Hosts[mac] = (to_switch, port)

            elif (to_switch, port) not in Hosts[mac]:
                Hosts[mac] = (to_switch, port)

            else:
                pass
            log.info("host %s has come up.", mac)

        elif event.leave == True:
            if mac not in Hosts:
                pass

            else:
                del Hosts[mac]
        else:
            pass
Exemplo n.º 40
0
    def __init__(self, t, r, service_ip, servers=[]):
        self.switches = {}  # Switches seen: [dpid] -> Switch
        self.t = t  # Master Topo object, passed in and never modified.
        self.r = r  # Master Routing object, passed in and reused.
        self.macTable = {}  # [mac] -> (dpid, port)
        self.macTable2 = {}
        self.paths = {}
        self.flows = {}
        self.link_usage = {}
        self.last_server = 0

        self.service_ip = IPAddr(service_ip)
        self.servers = [IPAddr(a) for a in servers]
        self.live_servers = {}  # IP -> MAC,port
        self.selected_server = None
        try:
            self.log = log.getChild(dpid_to_str(self.con.dpid))
        except:
            # Be nice to Python 2.6 (ugh)
            self.log = log

        self.total_connection = {}  # IP -> total connection
        for ip in servers:
            self.total_connection[ip] = 0
        self.memory = {}  # (srcip,dstip,srcport,dstport) -> MemoryEntry

        self.outstanding_probes = {}  # IP -> expire_time
        # How quickly do we probe?
        self.probe_cycle_time = 5

        # How long do we wait for an ARP reply before we consider a server dead?
        self.arp_timeout = 3

        # TODO: generalize all_switches_up to a more general state machine.
        self.all_switches_up = False  # Sequences event handling.
        core.openflow.addListeners(self, priority=0)
Exemplo n.º 41
0
    def _handle_PacketIn(self, event):
        """
        Handles packet in messages from the switch.
        """

        packet = event.parsed  # This is the parsed packet data.
        if not packet.parsed:
            log.warning("Ignoring incomplete packet")
            return

        a = packet.find('arp')
        if not a:
            return

        if a.opcode == arp.REQUEST:
            r = arp()
            r.hwtype = a.hwtype
            r.prototype = a.prototype
            r.hwlen = a.hwlen
            r.protolen = a.protolen
            r.opcode = arp.REPLY
            r.hwdst = a.hwsrc
            r.protodst = a.protosrc
            r.protosrc = a.protodst
            r.hwsrc = arp_table[a.protodst]
            e = ethernet(type=packet.type,
                         src=event.connection.eth_addr,
                         dst=a.hwsrc)
            e.payload = r
            log.info("%s answering ARP for %s" %
                     (dpid_to_str(event.connection.dpid), str(r.protosrc)))
            msg = of.ofp_packet_out()
            msg.data = e.pack()
            msg.actions.append(of.ofp_action_output(port=of.OFPP_IN_PORT))
            msg.in_port = event.port
            event.connection.send(msg)
Exemplo n.º 42
0
    def _handle_LinkEvent(self, event):
        '''handling LinkEvent'''
        link = event.link
        if event.added:
            log.debug(
                "Received LinkEvent, Link Added from %s to %s over port %d",
                util.dpid_to_str(link.dpid1), util.dpid_to_str(link.dpid2),
                link.port1)
            switch_ports[link.dpid1, link.port1] = link
            switch_ids[link.dpid1] = util.dpid_to_str(link.dpid1)
            switch_ids[link.dpid2] = util.dpid_to_str(link.dpid2)
            if not link.dpid1 in self.graph.keys():
                self.graph[link.dpid1] = {}
            self.graph[link.dpid1][link.dpid2] = 1
            self.graph[link.dpid1][link.dpid1] = 0

            if not link.dpid1 in self.wt_graph.keys():
                self.wt_graph[link.dpid1] = {}
            if not link.dpid2 in self.wt_graph.keys():
                self.wt_graph[link.dpid2] = {}
            self.wt_graph[link.dpid1][link.dpid2] = {
                'weight': 0,
                'numFlows': 0,
                'elephants': 0
            }
            self.wt_graph[link.dpid2][link.dpid1] = {
                'weight': 0,
                'numFlows': 0,
                'elephants': 0
            }

            if not link.dpid1 in self.port_info.keys():
                self.port_info[link.dpid1] = {}
            self.port_info[link.dpid1][link.dpid2] = link.port1

        else:
            log.debug(
                "Received LinkEvent, Link Removed from %s to %s over port %d",
                util.dpid_to_str(link.dpid1), util.dpid_to_str(link.dpid2),
                link.port1)
Exemplo n.º 43
0
 def _handle_openflow_ConnectionUp(self, event):
     if self._install_flow:
         # Make sure we get appropriate traffic
         log.debug("Installing flow for %s", dpid_to_str(event.dpid))
         self.install_flow(event.connection)
Exemplo n.º 44
0
    def _handle_PacketIn(self, event):
        """
    Handle packet in messages from the switch.
    """

        packet = event.parsed

        # Fetching the updated version of the list.
        file = open('siteAhosts', 'r')
        siteAhosts = eval(file.readline())
        file.close()

        # Fetchin the updated version of allowed connections.
        file = open('allowedConnections', 'r')
        allowedConnections = eval(file.readline())
        file.close()

        def flood(message=None):
            """
      Floods packet.
      """
            msg = of.ofp_packet_out()
            if time.time() - self.connection.connect_time >= _flood_delay:
                if self.hold_down_expired is False:
                    self.hold_down_expired = True
                    log.info("%s: Flood hold-down expired -- flooding",
                             dpid_to_str(event.dpid))
                if message is not None: log.debug(message)
                msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
            else:
                pass
            msg.data = event.ofp
            msg.in_port = event.port
            self.connection.send(msg)

        def drop(duration=None):
            """
      Drops packet.
      """

            if duration is not None:
                if not isinstance(duration, tuple):
                    duration = (duration, duration)
                msg = of.ofp_flow_mod()
                msg.match = of.ofp_match.from_packet(packet)
                msg.idle_timeout = duration[0]
                msg.hard_timeout = duration[1]
                msg.buffer_id = event.ofp.buffer_id
                self.connection.send(msg)
            elif event.ofp.buffer_id is not None:
                msg = of.ofp_packet_out()
                msg.buffer_id = event.ofp.buffer_id
                msg.in_port = event.port
                self.connection.send(msg)

        self.macToPort[packet.src] = event.port

        def isAllowedConnection(source, destination):
            """
      Checks if incoming packets IP source and destination is one of the allowed connections.
      """
            if len(allowedConnections[0]) == 0:  #No allowed connections
                return False
            for element in allowedConnections:
                if (element[0] == source and element[1] == destination) or (
                        element[0] == destination and element[1]
                        == source):  #Traffic both ways are allowed
                    log.info("Connection listed as allowed: " + str(source) +
                             " <-> " + str(destination))
                    return True
            return False

        if (isinstance(packet.next, ipv4)):  #ARP packets are allowed past.
            if (packet.payload.protocol == 1) and (packet.payload.payload.type
                                                   == 0):
                #log.debug("ICMP packet ECHO REPLY")
                pass  #ECHO REPLY messages are allowed past.
            else:
                if (str(packet.src) not in siteAhosts
                    ):  #Packet comes from outside the network.
                    #Network openness can be chosen and filtered thereafter here. In this scenario, it is only the resource r1 that has restrictions.
                    if ((str(packet.next.dstip) == r1)
                            and (isAllowedConnection(str(packet.next.srcip),
                                                     str(packet.next.dstip))
                                 == False)):  #Unless the connection is allowed
                        drop()
                        return
                pass

        ## Remaining code in this function is unchanged from POX's stock component l2_learning.py
        if not self.transparent:
            if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered(
            ):
                drop()
                return

        if packet.dst.is_multicast:
            flood()
        else:
            if packet.dst not in self.macToPort:  # 4
                flood("Port for %s unknown -- flooding" % (packet.dst, ))  # 4a
            else:
                port = self.macToPort[packet.dst]
                if port == event.port:
                    log.warning(
                        "Same port for packet from %s -> %s on %s.%s.  Drop." %
                        (packet.src, packet.dst, dpid_to_str(
                            event.dpid), port))
                    drop(10)
                    return

                #log.debug("installing flow for %s.%i -> %s.%i" %
                #          (packet.src, event.port, packet.dst, port))
                msg = of.ofp_flow_mod()
                msg.match = of.ofp_match.from_packet(packet, event.port)
                msg.idle_timeout = 10
                msg.hard_timeout = 30
                msg.actions.append(of.ofp_action_output(port=port))
                msg.data = event.ofp  # 6a
                self.connection.send(msg)
Exemplo n.º 45
0
    def process_flow_stats(self, stats, reception_time):
        """Processes a FlowStats response to a FlowStatsRequest.

        Flow stats are processed to determine bandwidth utilization from the transmission side of a link. This method
        can produce inaccurate measurements when using Mininet link emulation. In particular, the statistics returned
        by emulated switches will not properly detect dropped packets, and the byte counts returned in FlowStats
        responses will overestimate the actual bytes forwarded when the link becomes congested. FlowStats are recorded
        on a per-flow basis, allowing the module the percentage of link utilization contributed by each flow in the
        network. Flows are differentiated by their controller assigned flow cookie. Flows with no cookie default to a
        cookie value of 0, and the FlowTracker module will consider all flows without a cookie as a single flow with
        cookie value 0.

        An exponential moving average is used to smooth bandwidth estimates, where the alpha of the exponential average
        is set by flow_tracker.avg_smooth_factor.
        """
        if not self.is_connected:
            return

        log.debug('== FlowStatsReceived - Switch: ' + dpid_to_str(self.dpid) +
                  ' - Time: ' + str(reception_time))
        self._last_flow_stats_query_network_time = reception_time - self._last_flow_stats_query_send_time

        # Clear byte counts for this interval
        for port in self.flow_interval_byte_count:
            self.flow_interval_byte_count[port] = {}

        num_flows = {}
        for port_num in self.tracked_ports:
            num_flows[port_num] = 0

        curr_event_byte_count = {}

        # Check for new ports on the switch
        ports = self.connection.features.ports
        for port in ports:
            if port.port_no == of.OFPP_LOCAL or port.port_no == of.OFPP_CONTROLLER:
                continue

            if not port.port_no in self.tracked_ports:
                continue

            if not port.port_no in self.flow_total_byte_count:
                self.flow_total_byte_count[port.port_no] = {}
                self.flow_interval_byte_count[port.port_no] = {}
                self.flow_interval_bandwidth_Mbps[port.port_no] = {}
                self.flow_average_bandwidth_Mbps[port.port_no] = {}

        # Record the number of bytes transmitted through each port for this monitoring interval
        for flow_stat in stats:
            for action in flow_stat.actions:
                if isinstance(action, of.ofp_action_output):
                    if action.port in self.tracked_ports:
                        if flow_stat.cookie != 0:
                            num_flows[action.port] += 1

                        # log.info('Got flow on tracked port with cookie: ' + str(flow_stat.cookie))
                        if action.port in curr_event_byte_count:
                            if flow_stat.cookie in curr_event_byte_count[
                                    action.port]:
                                curr_event_byte_count[action.port][flow_stat.cookie] = \
                                        curr_event_byte_count[action.port][flow_stat.cookie] + flow_stat.byte_count
                            else:
                                curr_event_byte_count[action.port][
                                    flow_stat.cookie] = flow_stat.byte_count
                        else:
                            curr_event_byte_count[action.port] = {}
                            curr_event_byte_count[action.port][
                                flow_stat.cookie] = flow_stat.byte_count

        # Determine the number of new bytes that appeared this interval
        negative_byte_count = False
        for port_num in curr_event_byte_count:
            if port_num in self.tracked_ports:
                if not port_num in self.flow_total_byte_count:
                    # Port has never appeared before
                    self.flow_total_byte_count[port_num] = {}
                    self.flow_interval_byte_count[port_num] = {}
                    for flow_cookie in curr_event_byte_count[port_num]:
                        self.flow_total_byte_count[port_num][
                            flow_cookie] = curr_event_byte_count[port_num][
                                flow_cookie]
                        self.flow_interval_byte_count[port_num][
                            flow_cookie] = curr_event_byte_count[port_num][
                                flow_cookie]
                else:
                    for flow_cookie in curr_event_byte_count[port_num]:
                        if flow_cookie not in self.flow_total_byte_count[
                                port_num]:
                            # Flow has not appeared before
                            self.flow_total_byte_count[port_num][
                                flow_cookie] = curr_event_byte_count[port_num][
                                    flow_cookie]
                            self.flow_interval_byte_count[port_num][
                                flow_cookie] = curr_event_byte_count[port_num][
                                    flow_cookie]
                        else:
                            if curr_event_byte_count[port_num][
                                    flow_cookie] < self.flow_total_byte_count[
                                        port_num][flow_cookie]:
                                # TODO: Find a better way to handle the case where a flow reports less bytes forwarded than the previous interval
                                log.info(
                                    'Switch: ' + dpid_to_str(self.dpid) +
                                    ' Port: ' + str(port_num) +
                                    ' FlowCookie: ' + str(flow_cookie) +
                                    '\n\tReported negative byte count: ' +
                                    str(curr_event_byte_count[port_num]
                                        [flow_cookie] -
                                        self.flow_total_byte_count[port_num]
                                        [flow_cookie]))
                                self.flow_total_byte_count[port_num][
                                    flow_cookie] = curr_event_byte_count[
                                        port_num][flow_cookie]
                                self.flow_interval_byte_count[port_num][
                                    flow_cookie] = 0
                                negative_byte_count = True
                            else:
                                self.flow_interval_byte_count[port_num][
                                    flow_cookie] = (
                                        curr_event_byte_count[port_num]
                                        [flow_cookie] -
                                        self.flow_total_byte_count[port_num]
                                        [flow_cookie])
                                self.flow_total_byte_count[port_num][
                                    flow_cookie] = curr_event_byte_count[
                                        port_num][flow_cookie]

        # Remove counters for flows that were removed in this interval
        flows_to_remove = []
        for port_num in self.flow_total_byte_count:
            for flow_cookie in self.flow_total_byte_count[port_num]:
                if port_num not in curr_event_byte_count or flow_cookie not in curr_event_byte_count[
                        port_num]:
                    flows_to_remove.append((port_num, flow_cookie))
        for removal in flows_to_remove:
            log.debug('Removing bandwidth counters for port: ' +
                      str(removal[0]) + ' flow cookie: ' + str(removal[1]))
            if removal[1] in self.flow_interval_byte_count[removal[0]]:
                del self.flow_interval_byte_count[removal[0]][removal[1]]
            if removal[1] in self.flow_total_byte_count[removal[0]]:
                del self.flow_total_byte_count[removal[0]][removal[1]]
            if removal[1] in self.flow_interval_bandwidth_Mbps[removal[0]]:
                del self.flow_interval_bandwidth_Mbps[removal[0]][removal[1]]
            if removal[1] in self.flow_average_bandwidth_Mbps[removal[0]]:
                del self.flow_average_bandwidth_Mbps[removal[0]][removal[1]]

        # Skip further processing if this was the first measurement interval, or if the measurement interval had an unreasonable duration
        if negative_byte_count or self._last_flow_stats_query_response_time is None:
            self._last_flow_stats_query_response_time = reception_time
            return
        interval_len = reception_time - self._last_flow_stats_query_response_time
        if interval_len < (
                0.5 * self.flow_tracker.periodic_query_interval_seconds
        ) or interval_len > (
                2 * self.flow_tracker.periodic_query_interval_seconds):
            self._last_flow_stats_query_response_time = reception_time
            return

        # Update bandwidth estimates
        self.flow_total_average_bandwidth_Mbps = {}
        for port_num in self.flow_interval_byte_count:
            if port_num not in self.flow_interval_bandwidth_Mbps:
                self.flow_interval_bandwidth_Mbps[port_num] = {}
                self.flow_average_bandwidth_Mbps[port_num] = {}

            for flow_cookie in self.flow_interval_byte_count[port_num]:
                if flow_cookie not in self.flow_interval_bandwidth_Mbps[
                        port_num]:
                    self.flow_interval_bandwidth_Mbps[port_num][
                        flow_cookie] = 0
                    self.flow_average_bandwidth_Mbps[port_num][flow_cookie] = 0

                # Update instant bandwidth - Note that this is capped at 5% above the link's maximum supported bandwidth
                self.flow_interval_bandwidth_Mbps[port_num][flow_cookie] = \
                        min(((self.flow_interval_byte_count[port_num][flow_cookie] * 8.0) / 1048576.0) / (interval_len),
                        self.flow_tracker.link_max_bw * 1.05)

                # Update running average bandwidth
                self.flow_average_bandwidth_Mbps[port_num][flow_cookie] = \
                    (self.flow_tracker.avg_smooth_factor * self.flow_interval_bandwidth_Mbps[port_num][flow_cookie]) + \
                    ((1 - self.flow_tracker.avg_smooth_factor) * self.flow_average_bandwidth_Mbps[port_num][flow_cookie])

                if (self.flow_average_bandwidth_Mbps[port_num][flow_cookie] <
                        0):
                    log.warn('FlowStats reported negative bandwidth (' +
                             str(self.flow_average_bandwidth_Mbps[port_num]
                                 [flow_cookie]) + ' Mbps) ' +
                             'on \n\tSwitch: ' + dpid_to_str(self.dpid) +
                             ' Port: ' + str(port_num) + ' Flow Cookie: ' +
                             str(flow_cookie) + '\n\tInterval Len: ' +
                             str(interval_len))

            self.flow_total_average_bandwidth_Mbps[port_num] = sum(
                self.flow_average_bandwidth_Mbps[port_num].itervalues())

        flow_average_switch_load = 0
        for port_num in self.flow_average_bandwidth_Mbps:
            for flow_cookie in self.flow_average_bandwidth_Mbps[port_num]:
                flow_average_switch_load += self.flow_average_bandwidth_Mbps[
                    port_num][flow_cookie]
        self.flow_average_switch_load = flow_average_switch_load

        # Update last response time
        complete_processing_time = time.time()
        self._last_flow_stats_query_processing_time = complete_processing_time - reception_time
        self._last_flow_stats_query_total_time = complete_processing_time - self._last_flow_stats_query_send_time

        # Print log information to file
        self.num_flows = num_flows
        if not self.flow_tracker._log_file is None:
            self.flow_tracker._log_file.write(
                'FlowStats Switch:' + dpid_to_str(self.dpid) + ' NumFlows:' +
                str(sum(self.num_flows.values())) + ' IntervalLen:' +
                str(interval_len) + ' IntervalEndTime:' + str(reception_time) +
                ' ResponseTime:' +
                str(self._last_flow_stats_query_total_time) + ' NetworkTime:' +
                str(self._last_flow_stats_query_network_time) +
                ' ProcessingTime:' +
                str(self._last_flow_stats_query_processing_time) +
                ' AvgSwitchLoad:' + str(self.flow_average_switch_load) + '\n')

            for port_num in self.flow_interval_bandwidth_Mbps:
                for flow_cookie in self.flow_interval_bandwidth_Mbps[port_num]:
                    self.flow_tracker._log_file.write(
                        'FSPort:' + str(port_num) + ' FlowCookie: ' +
                        str(flow_cookie) + ' BytesThisInterval:' +
                        str(self.flow_interval_byte_count[port_num]
                            [flow_cookie]) + ' InstBandwidth:' +
                        str(self.flow_interval_bandwidth_Mbps[port_num]
                            [flow_cookie]) + ' AvgBandwidth:' +
                        str(self.flow_average_bandwidth_Mbps[port_num]
                            [flow_cookie]) + '\n')

                link_util_Mbps = self.flow_tracker.get_link_utilization_mbps(
                    self.dpid, port_num)
                # Generate an event if the link is congested
                if link_util_Mbps >= self.flow_tracker.link_cong_threshold:
                    event = LinkUtilizationEvent(
                        self.dpid, port_num,
                        self.flow_tracker.link_cong_threshold, link_util_Mbps,
                        LinkUtilizationEvent.FLOW_STATS,
                        self.flow_average_bandwidth_Mbps[port_num])
                    self.flow_tracker.raiseEvent(event)

                # Log to console if a link is fully utilized
                if link_util_Mbps >= self.flow_tracker.link_max_bw:
                    # Get the DPID of the switch on the other side of the link
                    receive_switch_dpid = None
                    for link in core.openflow_discovery.adjacency:
                        if link.dpid1 == self.dpid and link.port1 == port_num:
                            receive_switch_dpid = link.dpid2
                            break

                    # Calculate the minimum node degree of the two switches
                    min_node_degree = min(
                        len(self.tracked_ports),
                        len(self.flow_tracker.switches[receive_switch_dpid].
                            tracked_ports))

                    log.warn(
                        'FlowStats: Fully utilized link detected! SendSw:' +
                        dpid_to_str(self.dpid) + ' Port:' + str(port_num) +
                        ' MinNodeDegree:' + str(min_node_degree) +
                        ' UtilMbps:' + str(link_util_Mbps))

            self.flow_tracker._log_file.write('\n')

        self._last_flow_stats_query_response_time = reception_time
    def connect(self, connection):
        if connection is None:
            self.log.warn("Can't connect to nothing")
            return
        if self.dpid is None:
            self.dpid = connection.dpid
        assert self.dpid == connection.dpid
        if self.ports is None:
            self.ports = connection.features.ports
        self.disconnect()
        self.connection = connection
        self._listeners = self.listenTo(connection)
        self._connected_at = time.time()

        label = dpid_to_str(connection.dpid)
        self.log = log.getChild(label)
        self.log.debug("Connect %s" % (connection, ))

        if self._id is None:
            if self.dpid not in switches_by_id and self.dpid <= 254:
                self._id = self.dpid
            else:
                self._id = TopoSwitch._next_id
                TopoSwitch._next_id += 1
            switches_by_id[self._id] = self

        self.network = IPAddr("10.%s.0.0" % (self._id, ))
        self.mac = dpid_to_mac(self.dpid)

        # Disable flooding
        con = connection
        log.debug("Disabling flooding for %i ports", len(con.ports))
        for p in con.ports.itervalues():
            if p.port_no >= of.OFPP_MAX: continue
            pm = of.ofp_port_mod(port_no=p.port_no,
                                 hw_addr=p.hw_addr,
                                 config=of.OFPPC_NO_FLOOD,
                                 mask=of.OFPPC_NO_FLOOD)
            con.send(pm)
        con.send(of.ofp_barrier_request())
        con.send(of.ofp_features_request())

        # Some of this is copied from DHCPD's __init__().
        self.send_table()

        def fix_addr(addr, backup):
            if addr is None: return None
            if addr is (): return IPAddr(backup)
            return IPAddr(addr)

        self.ip_addr = IPAddr("10.%s.0.1" % (self._id, ))
        #self.router_addr = self.ip_addr
        self.router_addr = None
        self.dns_addr = None  #fix_addr(dns_address, self.router_addr)

        self.subnet = IPAddr("255.0.0.0")
        self.pools = {}
        for p in connection.ports:
            if p < 0 or p >= of.OFPP_MAX: continue
            self.pools[p] = [
                IPAddr("10.%s.%s.%s" % (self._id, p, n))
                for n in range(1, 255)
            ]

        self.lease_time = 60 * 60  # An hour
        #TODO: Actually make them expire :)

        self.offers = {}  # Eth -> IP we offered
        self.leases = {}  # Eth -> IP we leased
 def __repr__(self):
     try:
         return "[%s/%s]" % (dpid_to_str(self.connection.dpid), self._id)
     except:
         return "[Unknown]"
Exemplo n.º 48
0
    def _handle_PacketIn(self, event):
        """
        Handle packet in messages from the switch to implement above algorithm.
        """

        packet = event.parsed

        def flood(message=None):
            """ Floods the packet """
            msg = of.ofp_packet_out()
            if time.time() - self.connection.connect_time >= _flood_delay:
                # Only flood if we've been connected for a little while...

                if self.hold_down_expired is False:
                    # Oh yes it is!
                    self.hold_down_expired = True
                    log.info("%s: Flood hold-down expired -- flooding",
                             dpid_to_str(event.dpid))

                if message is not None:
                    log.debug(message)
                #log.debug("%i: flood %s -> %s", event.dpid,packet.src,packet.dst)
                # OFPP_FLOOD is optional; on some switches you may need to change
                # this to OFPP_ALL.
                msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
            else:
                pass
                #log.info("Holding down flood for %s", dpid_to_str(event.dpid))
            msg.data = event.ofp
            msg.in_port = event.port
            self.connection.send(msg)

        def drop(duration=None):
            """
            Drops this packet and optionally installs a flow to continue
            dropping similar ones for a while
            """
            if duration is not None:
                if not isinstance(duration, tuple):
                    duration = (duration, duration)
                msg = of.ofp_flow_mod()
                msg.match = of.ofp_match.from_packet(packet)
                msg.idle_timeout = duration[0]
                msg.hard_timeout = duration[1]
                msg.buffer_id = event.ofp.buffer_id
                self.connection.send(msg)
            elif event.ofp.buffer_id is not None:
                msg = of.ofp_packet_out()
                msg.buffer_id = event.ofp.buffer_id
                msg.in_port = event.port
                self.connection.send(msg)

        self.macToPort[packet.src] = event.port  # 1

        if not self.transparent:  # 2
            if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered(
            ):
                drop()  # 2a
                return

        if packet.dst.is_multicast:
            flood()  # 3a
        else:
            if packet.dst not in self.macToPort:  # 4
                flood("Port for %s unknown -- flooding" % (packet.dst, ))  # 4a
            else:
                port = self.macToPort[packet.dst]
                print("self.macToPort: ")
                print(self.macToPort)
                # print("packet: ")
                # print(packet)
                if port == event.port:  # 5
                    # 5a
                    log.warning(
                        "Same port for packet from %s -> %s on %s.%s.  Drop." %
                        (packet.src, packet.dst, dpid_to_str(
                            event.dpid), port))
                    drop(10)
                    return
                # 6
                log.debug("installing flow for %s.%i -> %s.%i" %
                          (packet.src, event.port, packet.dst, port))
                msg = of.ofp_flow_mod()
                msg.match = of.ofp_match.from_packet(packet, event.port)
                msg.idle_timeout = 10
                msg.hard_timeout = 30
                msg.actions.append(of.ofp_action_output(port=port))
                msg.data = event.ofp  # 6a
                self.connection.send(msg)
Exemplo n.º 49
0
 def __repr__(self):
     """Returns string representation of the switch's data-plane identifier."""
     return dpid_to_str(self.dpid)
Exemplo n.º 50
0
 def _handle_ConnectionDown(self, event):
     ConnectionDown(event.connection, event.dpid)
     log.info("Switch %s DOWN.", dpid_to_str(event.dpid))
Exemplo n.º 51
0
    def process_port_stats(self, stats, reception_time):
        """Processes a PortStats response to a PortStatsRequest.

        Port stats are processed to determine bandwidth utilization from the receiving side of a link. This method was
        chosen to overcome limitations in Mininet's link emulation technique, which causes FlowStats to overestimate
        the utilization of a link when a link becomes congested. PortStats should always give an accurate count of the
        bytes received on a particular port even in congestion conditions, but the utilization cannot be determined on
        a per-flow basis using PortStats messages.

        An exponential moving average is used to smooth bandwidth estimates, where the alpha of the exponential average
        is set by flow_tracker.avg_smooth_factor.
        """
        if not self.is_connected:
            return

        log.debug('== PortStatsReceived - Switch: ' + dpid_to_str(self.dpid) +
                  ' - Time: ' + str(reception_time))

        self._last_port_stats_query_network_time = reception_time - self._last_port_stats_query_send_time

        # Clear byte counts for this interval
        for port in self.port_interval_byte_count:
            self.port_interval_byte_count[port] = 0
        curr_event_byte_count = {}

        # Check for new ports on the switch
        ports = self.connection.features.ports
        invalid_stat_ports = [
        ]  # Ports added to this list will not have their bandwidth averages updated for this interval
        for port in ports:
            if port.port_no == of.OFPP_LOCAL or port.port_no == of.OFPP_CONTROLLER:
                continue

            if not port.port_no in self.tracked_ports:
                continue

            if not port.port_no in self.port_total_byte_count:
                invalid_stat_ports.append(
                    port.port_no
                )  # Port bandwidth statistics are not updated on the first interval the port appears
                self.port_total_byte_count[port.port_no] = 0
                self.port_interval_byte_count[port.port_no] = 0
                self.port_interval_bandwidth_Mbps[port.port_no] = 0
                self.port_average_bandwidth_Mbps[port.port_no] = 0

        # Record the number of bytes transmitted through each port for this monitoring interval
        for port_stat in stats:
            if port_stat.port_no in self.tracked_ports:
                if port_stat.port_no in curr_event_byte_count:
                    curr_event_byte_count[
                        port_stat.port_no] = curr_event_byte_count[
                            port_stat.port_no] + port_stat.rx_bytes
                else:
                    curr_event_byte_count[
                        port_stat.port_no] = port_stat.rx_bytes

        # Determine the number of new bytes that appeared this interval, and set the flow removed flag to true if
        # any port count is lower than in the previous interval
        for port_num in curr_event_byte_count:
            if port_num in self.tracked_ports:
                if not port_num in self.port_total_byte_count:
                    # Port has never appeared before
                    self.port_total_byte_count[
                        port_num] = curr_event_byte_count[port_num]
                    self.port_interval_byte_count[
                        port_num] = curr_event_byte_count[port_num]
                elif curr_event_byte_count[
                        port_num] < self.port_total_byte_count[port_num]:
                    # Byte count for this monitoring interval is less than previous interval, flow must have been removed
                    self.port_total_byte_count[
                        port_num] = curr_event_byte_count[port_num]
                    self.port_interval_byte_count[port_num] = 0
                    invalid_stat_ports.append(port_num)
                else:
                    self.port_interval_byte_count[port_num] = (
                        curr_event_byte_count[port_num] -
                        self.port_total_byte_count[port_num])
                    self.port_total_byte_count[
                        port_num] = curr_event_byte_count[port_num]

        # Skip further processing if this was the first measurement interval, or if the measurement interval had an unreasonable duration
        if self._last_port_stats_query_response_time is None:
            self._last_port_stats_query_response_time = reception_time
            return
        interval_len = reception_time - self._last_port_stats_query_response_time
        if (interval_len <
            (0.5 * self.flow_tracker.periodic_query_interval_seconds)
                or interval_len >
            (2 * self.flow_tracker.periodic_query_interval_seconds)):
            self._last_port_stats_query_response_time = reception_time
            return

        # Update bandwidth estimates for valid ports
        for port_num in self.port_interval_byte_count:
            if port_num in invalid_stat_ports:
                continue

            # Update instant bandwidth - Note that this is capped at 5% above the link's maximum supported bandwidth
            self.port_interval_bandwidth_Mbps[port_num] = min(
                ((self.port_interval_byte_count[port_num] * 8.0) / 1048576.0) /
                (interval_len), self.flow_tracker.link_max_bw * 1.05)
            # Update running average bandwidth
            if port_num in self.port_average_bandwidth_Mbps:
                self.port_average_bandwidth_Mbps[port_num] = (self.flow_tracker.avg_smooth_factor *
                                                              self.port_interval_bandwidth_Mbps[port_num]) +\
                                                             ((1 - self.flow_tracker.avg_smooth_factor) *
                                                              self.port_average_bandwidth_Mbps[port_num])
            else:
                self.port_average_bandwidth_Mbps[
                    port_num] = self.port_interval_bandwidth_Mbps[port_num]

        port_average_switch_load = 0
        for port_num in self.port_average_bandwidth_Mbps:
            port_average_switch_load += self.port_average_bandwidth_Mbps[
                port_num]
        self.port_average_switch_load = port_average_switch_load

        # Update last response time
        complete_processing_time = time.time()
        self._last_port_stats_query_processing_time = complete_processing_time - reception_time
        self._last_port_stats_query_total_time = complete_processing_time - self._last_port_stats_query_send_time

        # Print log information to file
        if not self.flow_tracker._log_file is None:
            # Note: NumFlows is only included here so that the PortStats logs will exactly match the format of FlowStats
            # (makes for easier log processing)
            self.flow_tracker._log_file.write(
                'PortStats Switch:' + dpid_to_str(self.dpid) + ' NumFlows:' +
                str(sum(self.num_flows.values())) + ' IntervalLen:' +
                str(interval_len) + ' IntervalEndTime:' + str(reception_time) +
                ' ResponseTime:' +
                str(self._last_port_stats_query_total_time) + ' NetworkTime:' +
                str(self._last_port_stats_query_network_time) +
                ' ProcessingTime:' +
                str(self._last_port_stats_query_processing_time) +
                ' AvgSwitchLoad:' + str(self.port_average_switch_load) + '\n')

            for port_num in self.port_interval_bandwidth_Mbps:
                self.flow_tracker._log_file.write(
                    'PSPort:' + str(port_num) + ' BytesThisInterval:' +
                    str(self.port_interval_byte_count[port_num]) +
                    ' InstBandwidth:' +
                    str(self.port_interval_bandwidth_Mbps[port_num]) +
                    ' AvgBandwidth:' +
                    str(self.port_average_bandwidth_Mbps[port_num]) + '\n')

                if PORT_STATS_GENERATE_LINK_EVENTS:
                    if (self.port_average_bandwidth_Mbps[port_num] >=
                        (self.flow_tracker.link_cong_threshold)):
                        # Generate an event if the link is congested
                        # First, get the switch on the other side of this link
                        send_switch_dpid = None
                        send_port = None
                        for link in core.openflow_discovery.adjacency:
                            if link.dpid1 == self.dpid and link.port1 == port_num:
                                send_switch_dpid = link.dpid2
                                send_port = link.port2
                                break

                        if send_switch_dpid is None or send_port is None:
                            continue

                        log.debug(
                            'PortStats: Congested link detected! SendSw: ' +
                            dpid_to_str(send_switch_dpid) + ' Port: ' +
                            str(send_port))
                        event = LinkUtilizationEvent(
                            send_switch_dpid, send_port,
                            self.flow_tracker.link_cong_threshold,
                            self.port_average_bandwidth_Mbps[port_num],
                            LinkUtilizationEvent.PORT_STATS,
                            self.flow_tracker.switches[send_switch_dpid].
                            flow_average_bandwidth_Mbps[port_num])
                        self.flow_tracker.raiseEvent(event)

            self.flow_tracker._log_file.write('\n')

        self._last_port_stats_query_response_time = reception_time
Exemplo n.º 52
0
    def _handle_PacketIn(self, event):
        def forward(port):
            """Tell the switch to forward the packet"""
            msg = of.ofp_packet_out()
            msg.actions.append(of.ofp_action_output(port=port))
            if event.ofp.buffer_id is not None:
                msg.buffer_id = event.ofp.buffer_id
            else:
                msg.data = event.ofp.data
            msg.in_port = event.port
            self.connection.send(msg)

        def flood():
            """Tell all switches to flood the packet, remember that we disable inter-switch flooding at startup"""
            #forward(of.OFPP_FLOOD)
            for (dpid, switch) in switches.iteritems():
                msg = of.ofp_packet_out()
                if switch == self:
                    if event.ofp.buffer_id is not None:
                        msg.buffer_id = event.ofp.buffer_id
                    else:
                        msg.data = event.ofp.data
                    msg.in_port = event.port
                else:
                    msg.data = event.ofp.data
                ports = [
                    p for p in switch.connection.ports
                    if (dpid, p) not in switch_ports
                ]

                if len(ports) > 0:
                    for p in ports:
                        msg.actions.append(of.ofp_action_output(port=p))
                    switches[dpid].connection.send(msg)

        def drop():
            """Tell the switch to drop the packet"""
            if event.ofp.buffer_id is not None:  #nothing to drop because the packet is not in the Switch buffer
                msg = of.ofp_packet_out()
                msg.buffer_id = event.ofp.buffer_id
                event.ofp.buffer_id = None  # Mark as dead, copied from James McCauley, not sure what it does but it does not work otherwise
                msg.in_port = event.port
                self.connection.send(msg)

        #log.debug("Received PacketIn")
        packet = event.parsed

        SwitchPort = namedtuple('SwitchPoint', 'dpid port')

        if (
                event.dpid, event.port
        ) not in switch_ports:  # only relearn locations if they arrived from non-interswitch links
            mac_learning[packet.src] = SwitchPort(
                event.dpid,
                event.port)  #relearn the location of the mac-address

        if packet.effective_ethertype == packet.LLDP_TYPE:
            drop()
            log.debug("Switch %s dropped LLDP packet", self)
        elif packet.dst.is_multicast:
            flood()
            log.debug("Switch %s flooded multicast 0x%0.4X type packet", self,
                      packet.effective_ethertype)
        elif packet.dst not in mac_learning:
            flood(
            )  #Let's first learn the location of the recipient before generating and installing any rules for this. We might flood this but that leads to further complications if half way the flood through the network the path has been learned.
            log.debug(
                "Switch %s flooded unicast 0x%0.4X type packet, due to unlearned MAC address",
                self, packet.effective_ethertype)
        elif packet.effective_ethertype == packet.ARP_TYPE:
            #These packets are sent so not-often that they don't deserve a flow
            #Instead of flooding them, we drop it at the current switch and have it resend by the switch to which the recipient is connected.
            #flood()
            drop()
            dst = mac_learning[packet.dst]
            #print dst.dpid, dst.port
            msg = of.ofp_packet_out()
            msg.data = event.ofp.data
            msg.actions.append(of.ofp_action_output(port=dst.port))
            switches[dst.dpid].connection.send(msg)
            log.debug(
                "Switch %s processed unicast ARP (0x0806) packet, send to recipient by switch %s",
                self, util.dpid_to_str(dst.dpid))
        else:
            log.debug(
                "Switch %s received PacketIn of type 0x%0.4X, received from %s.%s",
                self, packet.effective_ethertype, util.dpid_to_str(event.dpid),
                event.port)
            dst = mac_learning[packet.dst]

            self.update_matrices()
            prev_path = _get_path(self.connection.dpid, dst.dpid)

            path_nodes = map(int, str(prev_path).split(' -> '))
            hops = len(path_nodes) - 1

            if hops <= 0:
                print '*** No more available path to transmit the packet!!!'

            else:
                print 'The Shortest Path: %s' % (prev_path)

                for i in range(hops):
                    if (path_nodes[i], path_nodes[i + 1]) in link_flows_matrix:
                        print 'invalid operation!!!'

                    else:
                        # make link congested and update the timeout
                        link_flows_matrix[path_nodes[i], path_nodes[i + 1]] = 1
                        link_timeout_matrix[
                            path_nodes[i],
                            path_nodes[i + 1]] = time.time() + timeout
                        weights_topo[path_nodes[i]][path_nodes[i + 1]] = float(
                            '+inf')

                if prev_path is None:
                    flood()
                    return

                match = ofp_match_withHash.from_packet(packet)
                _install_path(prev_path, match)

                #forward the packet directly from the last switch, there is no need to have the packet run through the complete network.
                drop()
                dst = mac_learning[packet.dst]
                msg = of.ofp_packet_out()
                msg.data = event.ofp.data
                msg.actions.append(of.ofp_action_output(port=dst.port))
                switches[dst.dpid].connection.send(msg)

                self.raiseEvent(NewFlow(prev_path, match, adj))
                log.debug("Switch %s processed unicast 0x%0.4x type packet, send to recipient by switch %s",\
                    self, packet.effective_ethertype, util.dpid_to_str(dst.dpid))

            print '\nLink Flows Matrix:\n%s\n' % (link_flows_matrix)
            print 'Link Timeout Matrix:\n%s\n\n' % (link_timeout_matrix)
            print '############################################################################################################'
Exemplo n.º 53
0
def _handle_PacketIn(event):
    log.info("*** _handle_PacketIn... ***{0}, {1}".format(
        str(event.dpid), event.port))

    dpid = event.connection.dpid

    inport = event.port

    packet = event.parse()
    if not packet.parsed:
        log.warning("%i %i ignoring unparsed packet", dpid, inport)

        return

    a = packet.find('arp')

    if not a:
        if True:
            tcpp = packet.find('tcp')
            if not tcpp:
                tcpp = packet.find('udp')
                if not tcpp:
                    print "Not a good packet"
                    return
            print "finally a tcp packet"

            if dpid == 3:
                port = tcpp.srcport
                ip = tcpp.prev.srcip
                print ip
                trans_port = 0
                identifier = str(port) + "_" + str(ip)
                if identifier not in nat_trans:
                    trans_port = get_free_port(ip, port)
                else:
                    trans_port = nat_trans[identifier]

                if ip not in ip_vlan_dict:
                    ip_vlan_dict[ip] = get_free_vlan()
                    ip_vlan_reverse_dict[ip_vlan_dict[ip]] = ip
                vlan = ip_vlan_dict.get(ip, 0)
                print "About to install a flow ({0}, {1}) -> {2} came on port {3}".format(
                    str(ip), str(port), str(trans_port), str(inport))
                install_flows(event=event,
                              vlan=vlan,
                              input_ip=ip,
                              trans_port=trans_port,
                              original_port=port)
            else:
                vlan = packet.find('vlan')
                if vlan:
                    vlan_id = vlan.id
                else:
                    vlan_id = 0

                print "***************TYPE {0}".format(str(packet.type))
                print "VLAN_IDDD {0}".format(str(vlan_id))
                install_flows(event=event,
                              vlan=vlan_id,
                              input_ip=None,
                              trans_port=None,
                              original_port=None)
                return
            return
        else:
            print "Not an ipv4 packet"
    else:
        log.info("%s ARP %s %s => %s", dpid_to_str(dpid), {
            arp.REQUEST: "request",
            arp.REPLY: "reply"
        }.get(a.opcode, 'op:%i' % (a.opcode, )), str(a.protosrc),
                 str(a.protodst))

        if a.prototype == arp.PROTO_TYPE_IP:

            if a.hwtype == arp.HW_TYPE_ETHERNET:

                if a.opcode == arp.REQUEST:

                    if str(a.protodst) == "192.168.1.1":
                        r = arp()

                        r.hwtype = a.hwtype

                        r.prototype = a.prototype

                        r.hwlen = a.hwlen

                        r.protolen = a.protolen

                        r.opcode = arp.REPLY

                        r.hwdst = a.hwsrc

                        r.protodst = a.protosrc
                        r.protosrc = a.protodst

                        r.hwsrc = EthAddr("00:00:00:00:00:03")

                        e = ethernet(type=packet.type,
                                     src=r.hwsrc,
                                     dst=a.hwsrc)

                        e.payload = r

                        log.info("%s answering ARP for %s" %
                                 (dpid_to_str(dpid), str(r.protosrc)))

                        msg = of.ofp_packet_out()

                        msg.data = e.pack()

                        msg.actions.append(
                            of.ofp_action_output(port=of.OFPP_IN_PORT))

                        msg.in_port = inport

                        event.connection.send(msg)

                    if str(a.protodst) == "10.0.0.2":
                        r = arp()

                        r.hwtype = a.hwtype

                        r.prototype = a.prototype

                        r.hwlen = a.hwlen

                        r.protolen = a.protolen

                        r.opcode = arp.REPLY

                        r.hwdst = a.hwsrc

                        r.protodst = a.protosrc

                        r.protosrc = a.protodst

                        r.hwsrc = EthAddr("00:00:00:00:00:04")

                        e = ethernet(type=packet.type,
                                     src=r.hwsrc,
                                     dst=a.hwsrc)

                        e.payload = r

                        log.info("%s answering ARP for %s" %
                                 (dpid_to_str(dpid), str(r.protosrc)))

                        msg = of.ofp_packet_out()

                        msg.data = e.pack()

                        msg.actions.append(
                            of.ofp_action_output(port=of.OFPP_IN_PORT))

                        msg.in_port = inport

                        event.connection.send(msg)
Exemplo n.º 54
0
 def __str__(self):
     return "%s.%s -> %s.%s" % (dpid_to_str(
         self[0]), self[1], dpid_to_str(self[2]), self[3])
Exemplo n.º 55
0
 def __repr__(self):
     return util.dpid_to_str(self.connection.dpid)
Exemplo n.º 56
0
def _handle_ConnectionUp(event):
    # Initialize the forwarding rules for this switch.
    # After setting up, we send a barrier and wait for the response
    # before starting to listen to packet_ins for this switch -- before
    # the switch is set up, the packet_ins may not be what we expect,
    # and our responses may not work!

    connection = event.connection
    dpid = connection.dpid
    print "handle_ConnectionUP from dpid", dpid, util.dpid_to_str(dpid)
    portlist = connection.ports.values()
    # get port_no of each item in portlist
    portlist = map(lambda x: x.port_no, portlist)
    portlist = filter(lambda x: x < of.OFPP_MAX, portlist)
    # Turn on Nicira packet_ins
    msg = nx.nx_packet_in_format()
    connection.send(msg)
    # Turn on this switch's ability to specify tables in flow_mods
    msg = nx.nx_flow_mod_table_id()
    connection.send(msg)
    # Clear first table
    msg = nx.nx_flow_mod(command=of.OFPFC_DELETE, table_id=0)
    connection.send(msg)
    # Clear second table
    msg = nx.nx_flow_mod(command=of.OFPFC_DELETE, table_id=1)
    connection.send(msg)

    # this version sets default flooding actions only for ICMP and ARP packets
    # (though there IS a rule to send unknown packets to the controller)
    # Default rule for table 0: flood (IF a flooder) and send to table 1
    # Default rule for table 1: send to controller
    # Default rule for table 0 starts here
    msgi = nx.nx_flow_mod()  # icmp msg
    msga = nx.nx_flow_mod()  # arp msg
    msgi.table_id = msga.table_id = 0
    msgi.priority = msga.priority = 1  # Low priority
    msgi.idle_timeout = msga.idle_timeout = ICMP_IDLE_TIMEOUT

    msgi.match.append(nx.NXM_OF_ETH_TYPE(pkt.ethernet.IP_TYPE))
    msgi.match.append(nx.NXM_OF_IP_PROTO(pkt.ipv4.ICMP_PROTOCOL))
    msga.match.append(nx.NXM_OF_ETH_TYPE(pkt.ethernet.ARP_TYPE))

    if flooder(dpid):
        msgi.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
        msga.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
    msgi.actions.append(nx.nx_action_resubmit.resubmit_table(table=1))
    msga.actions.append(nx.nx_action_resubmit.resubmit_table(table=1))
    connection.send(msgi)
    connection.send(msga)

    # Default rule for table 1: send to controller
    msgi = nx.nx_flow_mod()  # icmp msg
    msga = nx.nx_flow_mod()  # arp msg
    msgi.table_id = msga.table_id = 1
    msgi.priority = msga.priority = 1  # Low priority
    msgi.idle_timeout = msga.idle_timeout = ICMP_IDLE_TIMEOUT

    msgi.match.append(nx.NXM_OF_ETH_TYPE(pkt.ethernet.IP_TYPE))
    msgi.match.append(nx.NXM_OF_IP_PROTO(pkt.ipv4.ICMP_PROTOCOL))
    msga.match.append(nx.NXM_OF_ETH_TYPE(pkt.ethernet.ARP_TYPE))

    msgi.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
    msga.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
    connection.send(msgi)
    connection.send(msga)

    if flooder(
            dpid
    ):  # create emtpy default action (applies mostly to TCP traffic)
        msgdef = nx.nx_flow_mod()
        msgdef.table_id = 0
        msgdef.priority = 0  # pld: MUST HAVE THIS
        msgdef.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
        connection.send(msgdef)

    def ready(event):  # called right below, as parameter
        if event.ofp.xid != 0x80000000:
            # Not the right barrier
            return
        log.info("%s ready", event.connection)
        event.connection.addListenerByName("PacketIn", _handle_PacketIn)
        return EventRemove

    # the following is to ensure that the switch does nothing else until it processes the actions above
    connection.send(of.ofp_barrier_request(xid=0x80000000))
    connection.addListenerByName("BarrierIn", ready)

    # now install switch
    if dpid in switchmap:
        sw = switchmap[dpid]
        if sw.connection() is None:
            sw.setConnection(connection)
    else:
        sw = SwitchNode(dpid, connection)
        switchmap[dpid] = sw
    # now add empty port list
    sw.setUnknownPorts(portlist)
Exemplo n.º 57
0
 def _handle_ConnectionDown(self, event):
     log.debug("Switch %s going down",
               util.dpid_to_str(self.connection.dpid))
     del switches[self.connection.dpid]
Exemplo n.º 58
0
    def _handle_PacketIn(self, event):
        def flood():
            """ Floods the packet """
            if self.is_holding_down:
                log.warning("Not flooding -- holddown active")
            msg = of.ofp_packet_out()
            # OFPP_FLOOD is optional; some switches may need OFPP_ALL
            msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
            msg.buffer_id = event.ofp.buffer_id
            msg.in_port = event.port
            self.connection.send(msg)

        def drop():
            # Kill the buffer
            if event.ofp.buffer_id is not None:
                msg = of.ofp_packet_out()
                msg.buffer_id = event.ofp.buffer_id
                event.ofp.buffer_id = None  # Mark is dead
                msg.in_port = event.port
                self.connection.send(msg)

        packet = event.parsed

        loc = (self, event.port)  # Place we saw this ethaddr
        oldloc = mac_map.get(packet.src)  # Place we last saw this ethaddr

        ################################################################ Handle LLDP Type ################################################################

        if packet.effective_ethertype == packet.LLDP_TYPE:
            drop()
            return

        ##################################################################################################################################################
        ################################################################ Handle ARP Type ################################################################
        if packet.type == ethernet.ARP_TYPE:
            arppack = packet.find('arp')
            if arppack.opcode == arp.REQUEST:
                mac_map[packet.src] = loc  # Learn position for ethaddr
                log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
                for switch in dpids:
                    if switch is self.dpid:
                        if swdebug:
                            print "Same switch"
                        continue
                    if swdebug:
                        print "Sending ARP REQ to", dpidToStr(switch)
                    action_out = [
                        of.ofp_action_output(port=port)
                        for port in host_ports[switch]
                    ]
                    core.openflow.sendToDPID(
                        switch,
                        of.ofp_packet_out(data=packet.pack(),
                                          action=action_out))
                return
            if arppack.opcode == arp.REPLY:
                mac_map[packet.src] = loc
                loc_dst = mac_map[packet.dst]  # Learn position for ethaddr
                if swdebug:
                    print "Send reply to DPID - ", str(
                        loc_dst[0]), "port -", loc_dst[1]
                if swdebug:
                    print "Type - ", type(str(loc_dst[0]))
                action_out = of.ofp_action_output(port=loc_dst[1])
                core.openflow.sendToDPID(
                    strToDPID(str(loc_dst[0])),
                    of.ofp_packet_out(data=packet.pack(), action=action_out))
                return

        ################################################################# Handle LAT Type ################################################################

        if packet.effective_ethertype == LAT_TYPE:
            """
      Handle incoming latency packets
      """
            #print dpidToStr(event.dpid)
            port = packet.src
            [prevtime, port_mac, swdpdest, swdpsrc] = packet.payload.split(',')
            prevtime = float(prevtime)
            currtime = time.time()
            #print "PrevTime = ", prevtime, "    CurrTime = ", currtime
            dest_dpid = dpidToStr(event.dpid)
            if dest_dpid == swdpdest:
                #print "DPID matched"
                latency = round((((currtime - prevtime) * 1000) -
                                 dpid_latency[strToDPID(swdpsrc)] -
                                 dpid_latency[event.dpid]), 4)
                #print "Latency =",latency
                #swd = ports[dpidToStr(self.dpid)]
                swd = ports[swdpsrc]
                for k in swd:
                    if swd[k][4] == port_mac:
                        break
                if latency >= 0:
                    if k in ports[swdpsrc]:
                        ports[swdpsrc][k][0] = latency
            return
        ##################################################################################################################################################

        if oldloc is None:
            if packet.src.is_multicast == False:
                mac_map[packet.src] = loc  # Learn position for ethaddr
                log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
        elif oldloc != loc:
            drop()
            return
            # ethaddr seen at different place!
            if core.openflow_discovery.is_edge_port(loc[0].dpid, loc[1]):
                # New place is another "plain" port (probably)
                log.debug("%s moved from %s.%i to %s.%i?", packet.src,
                          dpid_to_str(oldloc[0].dpid), oldloc[1],
                          dpid_to_str(loc[0].dpid), loc[1])
                if packet.src.is_multicast == False:
                    mac_map[packet.src] = loc  # Learn position for ethaddr
                    log.debug("Learned %s at %s.%i", packet.src, loc[0],
                              loc[1])
            elif packet.dst.is_multicast == False:
                # New place is a switch-to-switch port!
                # Hopefully, this is a packet we're flooding because we didn't
                # know the destination, and not because it's somehow not on a
                # path that we expect it to be on.
                # If spanning_tree is running, we might check that this port is
                # on the spanning tree (it should be).
                if packet.dst in mac_map:
                    # Unfortunately, we know the destination.  It's possible that
                    # we learned it while it was in flight, but it's also possible
                    # that something has gone wrong.
                    if swdebug:
                        print "Hit MacMap"
                    log.warning(
                        "Packet from %s to known destination %s arrived "
                        "at %s.%i without flow", packet.src, packet.dst,
                        dpid_to_str(self.dpid), event.port)

        if packet.dst.is_multicast:
            log.debug("Flood multicast from %s", packet.src)
            flood()
        else:
            if packet.dst not in mac_map:
                log.debug("%s unknown -- flooding" % (packet.dst, ))
                flood()
            else:
                dest = mac_map[packet.dst]
                if packet.type == ethernet.IP_TYPE:
                    ipv4_packet = packet.find("ipv4")
                    tos = ipv4_packet.tos
                    if packet.find("icmp"):
                        print "ICMP packet received"
                    print "Received ToS = ", tos
                match = of.ofp_match.from_packet(packet)
                self.install_path(dest[0], dest[1], match, event, tos)
Exemplo n.º 59
0
    def _handle_PacketIn(self, event):
        # Note: arp.hwsrc is not necessarily equal to ethernet.src
        # (one such example are arp replies generated by this module itself
        # as ethernet mac is set to switch dpid) so we should be careful
        # to use only arp addresses in the learning code!
        squelch = False

        dpid = event.connection.dpid
        inport = event.port
        packet = event.parsed
        if not packet.parsed:
            log.warning("%s: ignoring unparsed packet", dpid_to_str(dpid))
            return

        a = packet.find('arp')
        if not a: return

        log.debug("%s ARP %s %s => %s", dpid_to_str(dpid), {
            arp.REQUEST: "request",
            arp.REPLY: "reply"
        }.get(a.opcode, 'op:%i' % (a.opcode, )), str(a.protosrc),
                  str(a.protodst))

        if a.prototype == arp.PROTO_TYPE_IP:
            if a.hwtype == arp.HW_TYPE_ETHERNET:
                if a.protosrc != 0:

                    if _learn:
                        # Learn or update port/MAC info
                        if a.protosrc in _arp_table:
                            if _arp_table[a.protosrc] != a.hwsrc:
                                log.warn("%s RE-learned %s: %s->%s",
                                         dpid_to_str(dpid), a.protosrc,
                                         _arp_table[a.protosrc].mac, a.hwsrc)
                        else:
                            log.info("%s learned %s", dpid_to_str(dpid),
                                     a.protosrc)
                        _arp_table[a.protosrc] = Entry(a.hwsrc)

                    if a.opcode == arp.REQUEST:
                        # Maybe we can answer

                        if a.protodst in _arp_table:
                            # We have an answer...

                            r = arp()
                            r.hwtype = a.hwtype
                            r.prototype = a.prototype
                            r.hwlen = a.hwlen
                            r.protolen = a.protolen
                            r.opcode = arp.REPLY
                            r.hwdst = a.hwsrc
                            r.protodst = a.protosrc
                            r.protosrc = a.protodst
                            mac = _arp_table[a.protodst].mac
                            if mac is True:
                                # Special case -- use ourself
                                mac = _dpid_to_mac(dpid)
                            r.hwsrc = mac
                            e = ethernet(type=packet.type,
                                         src=_dpid_to_mac(dpid),
                                         dst=a.hwsrc)
                            e.payload = r
                            if packet.type == ethernet.VLAN_TYPE:
                                v_rcv = packet.find('vlan')
                                e.payload = vlan(eth_type=e.type,
                                                 payload=e.payload,
                                                 id=v_rcv.id,
                                                 pcp=v_rcv.pcp)
                                e.type = ethernet.VLAN_TYPE
                            log.info("%s answering ARP for %s" %
                                     (dpid_to_str(dpid), str(r.protosrc)))
                            msg = of.ofp_packet_out()
                            msg.data = e.pack()
                            msg.actions.append(
                                of.ofp_action_output(port=of.OFPP_IN_PORT))
                            msg.in_port = inport
                            event.connection.send(msg)
                            return EventHalt if _eat_packets else None
                        else:
                            # Keep track of failed queries
                            squelch = a.protodst in _failed_queries
                            _failed_queries[a.protodst] = time.time()

        if self._check_for_flood(dpid, a):
            # Didn't know how to handle this ARP, so just flood it
            msg = "%s flooding ARP %s %s => %s" % (dpid_to_str(dpid), {
                arp.REQUEST: "request",
                arp.REPLY: "reply"
            }.get(a.opcode, 'op:%i' % (a.opcode, )), a.protosrc, a.protodst)

            if squelch:
                log.debug(msg)
            else:
                log.info(msg)

            msg = of.ofp_packet_out()
            msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
            msg.data = event.ofp
            event.connection.send(msg.pack())

        return EventHalt if _eat_packets else None
Exemplo n.º 60
0
 def __repr__(self):
     return dpid_to_str(self.dpid)