예제 #1
0
파일: stplib.py 프로젝트: Aries-Sushi/ryu
 def _update_wait_bpdu_timer(self):
     if self.wait_timer_event is not None:
         self.wait_timer_event.set()
         self.wait_timer_event = None
         self.logger.debug('[port=%d] Wait BPDU timer is updated.',
                           self.ofport.port_no, extra=self.dpid_str)
     hub.sleep(0)  # For thread switching.
예제 #2
0
파일: stplib.py 프로젝트: Aries-Sushi/ryu
    def _transmit_bpdu(self):
        while True:
            # Send config BPDU packet if port role is DESIGNATED_PORT.
            if self.role == DESIGNATED_PORT:
                now = datetime.datetime.today()
                if self.send_tc_timer and self.send_tc_timer < now:
                    self.send_tc_timer = None
                    self.send_tc_flg = False

                if not self.send_tc_flg:
                    flags = 0b00000000
                    log_msg = '[port=%d] Send Config BPDU.'
                else:
                    flags = 0b00000001
                    log_msg = '[port=%d] Send TopologyChange BPDU.'
                bpdu_data = self._generate_config_bpdu(flags)
                self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data)
                self.logger.debug(log_msg, self.ofport.port_no,
                                  extra=self.dpid_str)

            # Send Topology Change Notification BPDU until receive Ack.
            if self.send_tcn_flg:
                bpdu_data = self._generate_tcn_bpdu()
                self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data)
                self.logger.debug('[port=%d] Send TopologyChangeNotify BPDU.',
                                  self.ofport.port_no, extra=self.dpid_str)

            hub.sleep(self.port_times.hello_time)
예제 #3
0
 def _send_echo_request(self):
     for datapath in self.datapaths.values():
         parser = datapath.ofproto_parser
         echo_req = parser.OFPEchoRequest(datapath,
                                          data="%.12f" % time.time())
         datapath.send_msg(echo_req)
         hub.sleep(random.randint(0,10))
예제 #4
0
파일: stplib.py 프로젝트: Aries-Sushi/ryu
    def _change_status(self, new_state, thread_switch=True):
        if new_state is not PORT_STATE_DISABLE:
            self.ofctl.set_port_status(self.ofport, new_state)

        if(new_state is PORT_STATE_FORWARD or
                (self.state is PORT_STATE_FORWARD and
                    (new_state is PORT_STATE_DISABLE or
                     new_state is PORT_STATE_BLOCK))):
            self.topology_change_notify(new_state)

        if (new_state is PORT_STATE_DISABLE
                or new_state is PORT_STATE_BLOCK):
            self.send_tc_flg = False
            self.send_tc_timer = None
            self.send_tcn_flg = False
            self.send_bpdu_thread.stop()
        elif new_state is PORT_STATE_LISTEN:
            self.send_bpdu_thread.start()

        self.state = new_state
        self.send_event(EventPortStateChange(self.dp, self))

        if self.state_event is not None:
            self.state_event.set()
            self.state_event = None
        if thread_switch:
            hub.sleep(0)  # For thread switching.
예제 #5
0
 def barrier_reply_handler(cls, msg):
     datapath = msg.datapath
     if cls._SENDER != None and\
         datapath.id == cls._SENDER.get_dpid():
         while not isinstance(cls._WAITER, hub.Event):
             hub.sleep(1)
         cls._WAITER.set()
예제 #6
0
    def send_mldquey_regularly(self):
        self.logger.debug("")
        mc_service_info_list = []
        for line in open(self.MULTICAST_SERVICE_INFO, "r"):
            if line[0] == "#":
                continue
            else:
                # multicast_addr, srcip_addr
                column = list(line[:-1].split(","))
                mc_service_info_list.append(column)
        self.logger.debug(
            "send address(multicast_addr, srcip_addr) : %s",
            str(mc_service_info_list))

        while True:
            for mc_service_info in mc_service_info_list:
                ip_addr_list = []
                if not mc_service_info[1] == "":
                    ip_addr_list.append(mc_service_info[1])
                mld = self.create_mldquery(
                    mc_service_info[0], ip_addr_list)
                sendpkt = self.create_packet(
                    self.addressinfo[0], self.addressinfo[1],
                    self.addressinfo[2], self.addressinfo[3], mld)
                self.send_packet_to_sw(sendpkt)
                hub.sleep(self.WAIT_TIME)
예제 #7
0
    def _transmit_tc_bpdu(self):
        """ Set send_tc_flg to send Topology Change BPDU. """
        timer = self.port_times.max_age + self.port_times.forward_delay

        self.send_tc_flg = True
        hub.sleep(timer)
        self.send_tc_flg = False
    def network_monitor(self):
        ''' Monitors network RTT and Congestion '''

        self.logger.info('Starting monitoring sub-routine')
        # First, we get an estimation of the link benchmark_network_capacity
        # in a state where the network will be idle.
        self.benchmark_network_capacity()

        # Then we start the periodic measurement of RTT times and port
        # utilization

        counter = 0
        while True:
            if not self.topo_shape.is_empty():
                self.logger.info('Requesting port stats to '
                                 'measure utilization')
                for dpid, s in self.topo_shape.dpid_to_switch.iteritems():

                    s.port_stats_request_time = time.time()

                    # Requesting portstats to calculate controller
                    # to switch delay and congeston
                    self._request_port_stats(s)

                    # Calculating peering switches RTT (once every 10 portstats
                    # so ~10 secs)
                    if counter % 10 == 0:
                        self.send_latency_probe_packet(s)
                counter += 1

            hub.sleep(1)
예제 #9
0
 def _monitor(self):
     while True:
         # Send the states requests to the state tables
         for dp in self.datapaths.values():
             self._request_stats(dp)
         hub.sleep(timewindow)  # Wait X seconds
         self.replies = 0
예제 #10
0
파일: tester.py 프로젝트: paisa4ever/ryu
    def _send_packet_thread(self, arg):
        """ Send several packets continuously. """
        if self.ingress_event is None or self.ingress_event._cond:
            return

        # display dots to express progress of sending packets
        if not arg['thread_counter'] % arg['dot_span']:
            sys.stdout.write(".")
            sys.stdout.flush()

        arg['thread_counter'] += 1

        # pile up float values and
        # use integer portion as the number of packets this thread sends
        arg['packet_counter'] += arg['packet_counter_inc']
        count = int(arg['packet_counter'])
        arg['packet_counter'] -= count

        hub.sleep(CONTINUOUS_THREAD_INTVL)

        tid = hub.spawn(self._send_packet_thread, arg)
        self.ingress_threads.append(tid)
        hub.sleep(0)
        for _ in range(count):
            try:
                self.tester_sw.send_packet_out(arg['pkt_data'])
            except Exception as err:
                self.thread_msg = err
                self.ingress_event.set()
                break
예제 #11
0
파일: Ryu.py 프로젝트: jonstout/SciPass
 def _balance_loop(self):
      while 1:
          self.logger.debug("Balancing")
          #--- tell the system to rebalance
          self.api.run_balancers()
          #--- sleep
          hub.sleep(self.balanceInterval)
예제 #12
0
파일: Ryu.py 프로젝트: bgeels/SciPass
 def _balance_loop(self):
      while 1:
          self.logger.error("here!!")
          #--- tell the system to rebalance
          self.api.run_balancers()
          #--- sleep
          hub.sleep(self.balanceInterval)
예제 #13
0
    def _do_timeout_for_leave(self, timeout, datapath, dst, in_port):
        """the process when the QUERY from the switch timeout expired."""
        parser = datapath.ofproto_parser
        dpid = datapath.id

        hub.sleep(timeout)
        outport = self._to_querier[dpid]['port']

        if self._to_hosts[dpid][dst]['ports'][in_port]['out']:
            return

        del self._to_hosts[dpid][dst]['ports'][in_port]
        self._del_flow_entry(datapath, in_port, dst)
        actions = []
        ports = []
        for port in self._to_hosts[dpid][dst]['ports']:
            actions.append(parser.OFPActionOutput(port))
            ports.append(port)

        if len(actions):
            self._send_event(
                EventMulticastGroupStateChanged(
                    MG_MEMBER_CHANGED, dst, outport, ports))
            self._set_flow_entry(
                datapath, actions, outport, dst)
            self._to_hosts[dpid][dst]['leave'] = None
        else:
            self._remove_multicast_group(datapath, outport, dst)
            del self._to_hosts[dpid][dst]
예제 #14
0
파일: switches.py 프로젝트: AsmaSwapna/ryu
    def lldp_loop(self):
        while self.is_active:
            self.lldp_event.clear()

            now = time.time()
            timeout = None
            ports_now = []
            ports = []
            for (key, data) in self.ports.items():
                if data.timestamp is None:
                    ports_now.append(key)
                    continue

                expire = data.timestamp + self.LLDP_SEND_PERIOD_PER_PORT
                if expire <= now:
                    ports.append(key)
                    continue

                timeout = expire - now
                break

            for port in ports_now:
                self.send_lldp_packet(port)
            for port in ports:
                self.send_lldp_packet(port)
                hub.sleep(self.LLDP_SEND_GUARD)      # don't burst

            if timeout is not None and ports:
                timeout = 0     # We have already slept
            # LOG.debug('lldp sleep %s', timeout)
            self.lldp_event.wait(timeout=timeout)
    def _send_regularly(self):
        while(self.loop): hub.sleep(1)
        datapath = self.msg.datapath
        parser = datapath.ofproto_parser
        ofproto = datapath.ofproto
        actions = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)]
        
        
        src = "11:22:33:44:55:66"
        dst = "66:55:44:33:22:11"
        srcip = "11::"
        dstip= "::11"
        in_port = 1
        
        while True:
            sendpkt = self.createPacket(src, dst, srcip, dstip)
#            self.sendPacketOut(parser, datapath, in_port, actions, sendpkt.data)
            
            IPPROTO_ICMP = socket.getprotobyname('ipv6-icmp')
#            sock = socket.socket(socket.AF_INET6, socket.SOCK_RAW, IPPROTO_ICMP)
            sock = socket.socket(socket.AF_INET6, socket.SOCK_RAW, IPPROTO_ICMP)
            self.logger.debug("******** send-before packet :\n %s\n" % (sendpkt,))
            while sendpkt.data:
                
                #sent_bytes = sock.sendto(sendpkt.data, ('ff38::1', 0, icmpv6.icmpv6(type_=icmpv6.ICMPV6_MEMBERSHIP_QUERY, data=icmpv6.mldv2_query(address='::'))))
                sent_bytes = sock.sendto(sendpkt.data, ('ff38::1', 0, 0))
                sendpkt.data = sendpkt.data[sent_bytes:]
    
            self.logger.debug("******** send packet :\n %s\n" % (sendpkt,))
            hub.sleep(self.WAIT_TIME)
예제 #16
0
 def _measurement(self):
     while True:
         print 'ActiveFlows: ', self.active_flows
         print 'FlowRate: ', self.flow_rate
         print 'Graph: ', json.dumps(json_graph.node_link_data(self.graph))
         self._send_measure_request()
         hub.sleep(1)
예제 #17
0
    def _monitor(self):
	i = 0
	isfirsttimecreate = True
        while True:
            self.stats['flow'] = {}
            self.stats['port'] = {}
            for dp in self.datapaths.values():
                self.port_link.setdefault(dp.id, {})
                self._request_stats(dp)
            hub.sleep(SLEEP_PERIOD)
	    self.dbugfile1.write("hub sleep 10s end\n")
	    self.dbugfile1.flush()
	    if self.stats['flow'] or self.stats['port']:
		self.dbugfile1.write("self.stats['flow'] and self.stats['port'] is not null \n")
		#self.dbugfile1.write("self.stats['flow']"+str(self.stats['flow'])+"\n")
		#self.dbugfile1.flush()		
		self.dbugfile1.write("flowspeed"+str(self.flow_speed)+"\n")
		self.dbugfile1.flush()
		self.dbugfile1.write("myflowspeed"+str(self.myflow_speed)+"\n")
		self.dbugfile1.flush()
                self.show_stat('flow', self.stats['flow'])
		self.dbugfile1.write("flow stat is show\n")
		self.dbugfile1.flush()	
                self.show_stat('port', self.stats['port'])
		self.dbugfile1.write("port stat is show\n")
		self.dbugfile1.flush()		
		hub.sleep(1)
	    else:
		self.dbugfile1.write("self.stats['flow'] and self.stats['port'] is  null \n")
		self.dbugfile1.flush()
예제 #18
0
파일: mc.py 프로젝트: LZUSDN/lzusdn02
 def _topoThread(self):
     linkNode = self.zkConf['root'] + self.zkConf['topo'] + self.ip
     if self.zk.exists(linkNode):
         self.zk.set(linkNode, json.dumps(self.links))
     else:
         self.zk.create(linkNode, json.dumps(self.links))
     hub.sleep(self.interval)
예제 #19
0
 def _update(self):
     # wait fof around 10s until all the swtiches connected to controller
     self._update_host_switch_file()
     hub.sleep(2)
     while True:
         self._update_host_switch_file()
         hub.sleep(5)
	def _port_stats_requester(self):                           #Defining a function to send port stat request every 10 seconds 
            while True:
                for dp in self.switchports.keys():
                    switch = self.switches[dp]                         #Getting datapath from dpid
                    msg = parser13.OFPPortDescStatsRequest(switch)
                    switch.send_msg(msg)
                hub.sleep(5)                                      # Sleeping for five seconds
예제 #21
0
파일: tester.py 프로젝트: apobbati/ryu
 def packet_in_handler(self, ev):
     state_list = [STATE_FLOW_MATCH_CHK]
     if self.state in state_list:
         if self.waiter:
             self.rcv_msgs.append(ev.msg)
             self.waiter.set()
             hub.sleep(0)
    def _discover(self):###periodically get topology information

	self.debugfile.write("child processing start"+"\n")
	self.debugfile.flush()
        i = 0
	is_fist_time = True	
        while True:
            self.show_topology()###show topology(topo link,link port,access host)

	    if(is_fist_time and i == 1):###we first get the topology information after the child processing start SLEEP_PERIOD time
		self.get_topology(None)	
		self.set_nodelink()
		is_fist_time = False
	   
            if i == 2:
                self.get_topology(None)	###we reflash the topology data and nodelink per 2*SLEEP_PERIOD

		self.set_nodelink()###set self.nodelink	
                i = 0	  	
	    
            hub.sleep(SLEEP_PERIOD)
            i = i + 1

	self.debugfile.write("child processing over"+"\n")
	self.debugfile.flush()	
예제 #23
0
파일: send_exp.py 프로젝트: qiaosiyi/my_ryu
	def exp_main(self):
		hub.sleep(5)
		while True:
			dpid = 5
			for dpid in range(7):
				dpid+=1
				print "dpid============",dpid
				dpid_entry_num = self.activeFlows[dpid-1]#This switch's entry number
				dpid_entry_num = 62
				# print "ps:",self.P_spread(dpid_entry_num)
				# print "get the P_s",self.P_spread(64)
				if self.P_spread(dpid_entry_num) == 0:
					exp_data=[	[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1]]
					data=self.gen_exp_data(exp_data)
					print "no spread"
				
				else:
					exp_data=[	[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1],
								[0,0,0,0,0,0,0,0,1]]
					exp_data=self.calculate_exp_data(dpid_entry_num,exp_data,dpid)
					data=self.gen_exp_data(exp_data)
			hub.sleep(5)
예제 #24
0
파일: ids_main.py 프로젝트: ramprackash/ryu
 def _monitor(self):
     while True:
         tm_log = open("./ryu/app/AdaptiveIDS/tmlogs.txt", "w")
         tm_log.close()
         for dp in self.datapaths.values():
             self._request_stats(dp.datapath)
         hub.sleep(IDSCfgParams.FLOW_STATS_INTERVAL)
	def flowGen(self):
		#wait until all switches finish initailize
		#while not self.isFinish():
		#	hub.sleep(1)
                hub.sleep(10)

		pkt = packet.Packet()
		pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
		pkt.add_protocol(ethernet.ethernet())
		pkt.add_protocol(ipv4.ipv4(dst='192.168.99.1'))
		pkt.add_protocol(icmp.icmp())
		pkt.serialize()
		data = pkt.data
		
                start = self.paths[0][0]
                print 'start from', start
		dp = self.datapaths[start[0]];
		ofp = dp.ofproto
		actions = [dp.ofproto_parser.OFPActionOutput(start[1])]
		while True:
			print 'one packet sent'
			out = dp.ofproto_parser.OFPPacketOut(datapath=dp, actions=actions, data=data, 
				in_port=ofp.OFPP_CONTROLLER, buffer_id=ofp.OFP_NO_BUFFER)
			dp.send_msg(out)
			hub.sleep(1)
예제 #26
0
파일: main.py 프로젝트: haidlir/SNHx
    def _routing(self):
        print('system is ready')
        while True:
            if Config.forwarding == 'IP':
                Collector.path = DFS.findAllPairsPath(Collector.topo)
                hub.sleep(5)

            elif Config.forwarding == 'MPLS':
                # localtime = time.asctime(time.localtime(time.time()))
                # print(localtime, 'find Paths is started')
                # start = time.time()
                # create topo
                topo = {}
                for src in Collector.topo:
                    topo[src] = {}
                    for dst in Collector.topo[src]:
                        topo[src][dst] = Collector.topo[src][dst].get_cost()

                Collector.path = AllPairsSP.main(topo)

                path = Collector.path
                datapaths = Collector.datapaths
                topo = Collector.topo
                route = Config.route
                MPLSSetup.main(path, datapaths, topo, route)
                # done = time.time()
                # print('routing calc: ', done - start)
                hub.sleep(60)
예제 #27
0
파일: client.py 프로젝트: Aries-Sushi/ryu
def transact_block(request, connection):
    """Emulate jsonrpc.Connection.transact_block without blocking eventlet.
    """
    error = connection.send(request)
    reply = None

    if error:
        return error, reply

    ovs_poller = poller.Poller()
    while not error:
        ovs_poller.immediate_wake()
        error, reply = connection.recv()

        if error != errno.EAGAIN:
            break

        if (reply and
            reply.id == request.id and
            reply.type in (jsonrpc.Message.T_REPLY,
                           jsonrpc.Message.T_ERROR)):
            break

        connection.run()
        connection.wait(ovs_poller)
        connection.recv_wait(ovs_poller)
        ovs_poller.block()

        hub.sleep(0)

    return error, reply
예제 #28
0
파일: base.py 프로젝트: Huangmachi/ryu
    def pause(self, seconds=0):
        """Relinquishes hub for given number of seconds.

        In other words is puts to sleep to give other greenthread a chance to
        run.
        """
        hub.sleep(seconds)
예제 #29
0
    def _update(self):
        
        while True:
            for dp in self.datapaths.values():
                parser = dp.ofproto_parser
                ofproto = dp.ofproto
                self.logger.info("dpid="+str(dp.id));
                if dp.id == 161 :
                    
                    self.logger.info("Check SQL Database")
                    db = database()
                    list = db.db_getList()
                    for data in list:
                        print data["id"],"(",type(data["id"]),")"," ",data["address"],"(",type(data["address"]),")"," ",data["access"],"(",type(data["access"]),")"
                        if data["access"] == 1:
                             
                            self.logger.info("DENY "+str(data["address"]))
                            match = parser.OFPMatch(eth_type=0x0800, ipv4_src=str(data["address"]))
                            actions = {}
                            self.add_flow(dp, data["id"]+10, match, actions)

                        if data["access"] == 0:
                            self.logger.info("ALLOW "+str(data["address"]))
                            match = parser.OFPMatch(eth_type=0x0800, ipv4_src=str(data["address"]))
                            mod = parser.OFPFlowMod(dp, command=ofproto.OFPFC_DELETE, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY, match=match)
                            dp.send_msg(mod)



            hub.sleep(10)
예제 #30
0
    def lldp_loop(self):
        """
        This function is how the topology is kept up to date.
        It will send out LLDP packets at every interval to
        update the topology.
        """
        while self.is_active:
	    self.lldp_event.clear()
            now = time.time()
            timeout = None
            ports_now = []
            ports = []
            for (key, data) in self.ports.items():
                if data.timestamp is None:
                    ports_now.append(key)
                    continue

                expire = data.timestamp + self.LLDP_SEND_PERIOD_PER_PORT
                if expire <= now:
                    ports.append(key)
                    continue

                timeout = expire - now
                break

            for port in ports_now:
                self.send_lldp_packet(port)
            for port in ports:
                self.send_lldp_packet(port)
                hub.sleep(self.LLDP_SEND_GUARD)      # don't burst
            if timeout is not None and ports:
                timeout = 0     # We have already slept
            # LOG.debug('lldp sleep %s', timeout)
            self.draw_graph(1, draw=True)
            self.lldp_event.wait(timeout=timeout)
예제 #31
0
    def _recv_loop(self):
        buf = bytearray()
        count = 0
        min_read_len = remaining_read_len = ofproto_common.OFP_HEADER_SIZE

        while self.state != DEAD_DISPATCHER:
            try:
                read_len = min_read_len
                if remaining_read_len > min_read_len:
                    read_len = remaining_read_len
                ret = self.socket.recv(read_len)
            except SocketTimeout:
                continue
            except ssl.SSLError:
                # eventlet throws SSLError (which is a subclass of IOError)
                # on SSL socket read timeout; re-try the loop in this case.
                continue
            except (EOFError, IOError):
                break

            if not ret:
                break

            buf += ret
            buf_len = len(buf)
            while buf_len >= min_read_len:
                (version, msg_type, msg_len, xid) = ofproto_parser.header(buf)
                if msg_len < min_read_len:
                    # Someone isn't playing nicely; log it, and try something sane.
                    LOG.debug(
                        "Message with invalid length %s received from switch at address %s",
                        msg_len, self.address)
                    msg_len = min_read_len
                if buf_len < msg_len:
                    remaining_read_len = (msg_len - buf_len)
                    break

                msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid,
                                         buf[:msg_len])
                # LOG.debug('queue msg %s cls %s', msg, msg.__class__)
                if msg:
                    ev = ofp_event.ofp_msg_to_ev(msg)
                    self.ofp_brick.send_event_to_observers(ev, self.state)

                    def dispatchers(x):
                        return x.callers[ev.__class__].dispatchers

                    handlers = [
                        handler for handler in self.ofp_brick.get_handlers(ev)
                        if self.state in dispatchers(handler)
                    ]
                    for handler in handlers:
                        handler(ev)

                buf = buf[msg_len:]
                buf_len = len(buf)
                remaining_read_len = min_read_len

                # We need to schedule other greenlets. Otherwise, ryu
                # can't accept new switches or handle the existing
                # switches. The limit is arbitrary. We need the better
                # approach in the future.
                count += 1
                if count > 2048:
                    count = 0
                    hub.sleep(0)
예제 #32
0
 def _monitor(self):
     while True:
         self.getsFlowvalues()
         if (len(self.udp_portdst) != 0 or len(self.tcp_portdst) != 0):  # If counters are != 0
             self.entropy_computation()
         hub.sleep(timewindow)  # Wait X seconds
예제 #33
0
 def monitor(self):
     while True:
         self.statsReplied = 0
         for dp in self.datapaths.values():
             self.requestStats(dp)
         hub.sleep(STAT_REQUEST_PERIOD)
예제 #34
0
    def _calc_ForwardingMatrix(self):
        while self.is_active and self.path_computation == "extended_disjoint":
            #Wait for actual topology to set
            if self.topology_update == None:
                LOG.warn(
                    "_calc_ForwardingMatrix(): Wait for actual topology to set"
                )
            #Wait for the topology to settle for 10 seconds
            elif self.forwarding_update == None and self.topology_update + timedelta(
                    seconds=10) >= datetime.now():
                LOG.warn(
                    "_calc_ForwardingMatrix(): Wait for the topology to settle for 10 seconds"
                )
            elif self.forwarding_update == None or self.topology_update > self.forwarding_update:
                LOG.warn(
                    "_calc_ForwardingMatrix(): Compute new Forwarding Matrix")
                forwarding_update_start = datetime.now()
                #Update the version of this
                self.fw = nx.extended_disjoint(
                    self.G,
                    node_disjoint=self.node_disjoint,
                    edge_then_node_disjoint=self.edge_then_node_disjoint)

                for _s in self.fw:
                    for d in self.fw[_s]:

                        nexthop = self.fw[_s][d]
                        if nexthop == None:
                            continue

                        if _s in self.G.node:
                            s = _s
                            group_id = d
                            port = self.G.edge[s][nexthop]['port']
                            dpid = s
                            dp = self.G.node[dpid]['switch'].dp
                            ofp = dp.ofproto
                            parser = dp.ofproto_parser

                            #match = parser.OFPMatch(vlan_vid=(ofp.OFPVID_PRESENT | vlan_id, ofp.OFPVID_PRESENT | 2**6-1))
                            match = parser.OFPMatch(
                                vlan_vid=(ofp.OFPVID_PRESENT | 0,
                                          ofp.OFPVID_PRESENT | 0),
                                eth_dst=d)
                            LOG.warn(
                                "\tConfigure switch %d for destination %d" %
                                (dpid, d))
                            LOG.warn(
                                "\t\tCreate fast failover group 0x%x/%d:" %
                                (group_id, group_id))
                            buckets = []
                            LOG.warn(
                                "\t\t\tAdding primary bucket to switch %d over port %d"
                                % (nexthop, port))
                            buckets.append(
                                parser.OFPBucket(
                                    watch_port=port,
                                    actions=[parser.OFPActionOutput(port)]))

                            if self.node_disjoint == False or self.edge_then_node_disjoint == True:
                                failure_id = s * (2**6) + nexthop

                                if (s,
                                    (s, nexthop)) in self.fw and d in self.fw[(
                                        s, (s, nexthop))] and self.fw[(s, (
                                            s, nexthop))][d] is not None:
                                    _nexthop = self.fw[(s, (s, nexthop))][d]
                                    _port = self.G.edge[s][_nexthop]['port']
                                    LOG.warn(
                                        "\t\t\tAdding secondary edge-disjoint bucket, setting VLAN = 0x%x, output to switch %d over port %d"
                                        % (failure_id, _nexthop, _port))
                                    buckets.append(
                                        parser.OFPBucket(
                                            watch_port=_port,
                                            actions=[
                                                parser.OFPActionSetField(
                                                    vlan_vid=ofp.OFPVID_PRESENT
                                                    | failure_id),
                                                parser.OFPActionOutput(_port)
                                            ]))

                                elif (s, nexthop) in self.fw and d in self.fw[(
                                        s, nexthop)] and self.fw[(
                                            s, nexthop)][d] is not None:
                                    _nexthop = self.fw[(s, nexthop)][d]
                                    _port = self.G.edge[s][_nexthop]['port']
                                    LOG.warn(
                                        "\t\t\tAdding secondary edge-disjoint bucket, setting VLAN = 0x%x, output to switch %d over port %d"
                                        % (failure_id, _nexthop, _port))
                                    buckets.append(
                                        parser.OFPBucket(
                                            watch_port=_port,
                                            actions=[
                                                parser.OFPActionSetField(
                                                    vlan_vid=ofp.OFPVID_PRESENT
                                                    | failure_id),
                                                parser.OFPActionOutput(_port)
                                            ]))

                            else:
                                failure_id = nexthop

                                if (s, nexthop) in self.fw and d in self.fw[(
                                        s, nexthop)] and self.fw[(
                                            s, nexthop)][d] is not None:
                                    _nexthop = self.fw[(s, nexthop)][d]
                                    _port = self.G.edge[s][_nexthop]['port']
                                    LOG.warn(
                                        "\t\t\tAdding secondary node-disjoint bucket, setting VLAN = 0x%x, output to switch %d over port %d"
                                        % (failure_id, _nexthop, _port))
                                    buckets.append(
                                        parser.OFPBucket(
                                            watch_port=_port,
                                            actions=[
                                                parser.OFPActionSetField(
                                                    vlan_vid=ofp.OFPVID_PRESENT
                                                    | failure_id),
                                                parser.OFPActionOutput(_port)
                                            ]))

                            req = parser.OFPGroupMod(datapath=dp,
                                                     type_=ofp.OFPGT_FF,
                                                     group_id=group_id,
                                                     buckets=buckets)
                            LOG.debug(req)
                            dp.send_msg(req)

                            LOG.warn("\t\tAdd forward to group %d" %
                                     (group_id))
                            actions = [parser.OFPActionGroup(group_id)]
                            inst = [
                                parser.OFPInstructionActions(
                                    ofp.OFPIT_APPLY_ACTIONS, actions)
                            ]
                            prio = ofp.OFP_DEFAULT_PRIORITY
                            req = parser.OFPFlowMod(datapath=dp,
                                                    match=match,
                                                    instructions=inst,
                                                    priority=prio)
                            LOG.debug(req)
                            dp.send_msg(req)

                            #LOG.warn("\t\tAdd default forwarding rule to switch %d over port %d"%(nexthop, port))
                            #match = parser.OFPMatch(vlan_vid=(ofp.OFPVID_PRESENT, ofp.OFPVID_PRESENT), eth_dst = d)
                            #actions=[parser.OFPActionOutput(port)]
                            #inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
                            #req = parser.OFPFlowMod(datapath=dp, match=match, instructions = inst)
                            #LOG.debug(req)
                            #dp.send_msg(req)

                        else:
                            s, v = _s

                            port = self.G.edge[s][nexthop]['port']
                            dpid = s
                            dp = self.G.node[dpid]['switch'].dp
                            ofp = dp.ofproto
                            parser = dp.ofproto_parser

                            if v in self.G.node:
                                LOG.warn(
                                    "\tConfigure switch %d for destination %d with node-failure %d"
                                    % (dpid, d, v))
                                LOG.warn(
                                    "\t\tCreate forward to switch %d on port %d"
                                    % (nexthop, port))
                                failure_id = v
                                match = parser.OFPMatch(
                                    vlan_vid=(ofp.OFPVID_PRESENT | failure_id,
                                              ofp.OFPVID_PRESENT | 2**6 - 1),
                                    eth_dst=d)
                                actions = [parser.OFPActionOutput(port)]
                                inst = [
                                    parser.OFPInstructionActions(
                                        ofp.OFPIT_APPLY_ACTIONS, actions)
                                ]
                                prio = ofp.OFP_DEFAULT_PRIORITY + 1
                                req = parser.OFPFlowMod(datapath=dp,
                                                        match=match,
                                                        instructions=inst,
                                                        priority=prio)
                                LOG.debug(req)
                                dp.send_msg(req)

                            else:
                                u, v = v
                                if s != u:
                                    LOG.warn(
                                        "\tConfigure switch %d for destination %d with edge-failure %d-%d"
                                        % (dpid, d, u, v))
                                    failure_id = u * (2**6) + v
                                    match = parser.OFPMatch(
                                        vlan_vid=(ofp.OFPVID_PRESENT
                                                  | failure_id),
                                        eth_dst=d)

                                    if self.edge_then_node_disjoint == True and v == nexthop and d in self.fw[
                                        (s,
                                         v)] and self.fw[(s,
                                                          v)][d] is not None:
                                        group_id = failure_id * 2**8 + d
                                        LOG.warn(
                                            "\t\tCreate fast failover group 0x%x/%d:"
                                            % (group_id, group_id))
                                        buckets = []
                                        LOG.warn(
                                            "\t\t\tAdding primary bucket to switch %d over port %d"
                                            % (nexthop, port))
                                        buckets.append(
                                            parser.OFPBucket(
                                                watch_port=port,
                                                actions=[
                                                    parser.OFPActionOutput(
                                                        port)
                                                ]))

                                        _nexthop = self.fw[(s, v)][d]
                                        _port = self.G.edge[s][_nexthop][
                                            'port']
                                        _failure_id = v
                                        LOG.warn(
                                            "\t\t\tAdding secondary bucket, setting VLAN = 0x%x, output to switch %d over port %d"
                                            % (_failure_id, _nexthop, _port))
                                        buckets.append(
                                            parser.OFPBucket(
                                                watch_port=_port,
                                                actions=[
                                                    parser.OFPActionSetField(
                                                        vlan_vid=ofp.
                                                        OFPVID_PRESENT
                                                        | _failure_id),
                                                    parser.OFPActionOutput(
                                                        _port)
                                                ]))

                                        req = parser.OFPGroupMod(
                                            datapath=dp,
                                            type_=ofp.OFPGT_FF,
                                            group_id=group_id,
                                            buckets=buckets)
                                        LOG.debug(req)
                                        dp.send_msg(req)

                                        LOG.warn(
                                            "\t\tAdd forward to group %d" %
                                            (group_id))
                                        actions = [
                                            parser.OFPActionGroup(group_id)
                                        ]
                                        inst = [
                                            parser.OFPInstructionActions(
                                                ofp.OFPIT_APPLY_ACTIONS,
                                                actions)
                                        ]
                                        prio = ofp.OFP_DEFAULT_PRIORITY + 2
                                        req = parser.OFPFlowMod(
                                            datapath=dp,
                                            match=match,
                                            instructions=inst,
                                            priority=prio)
                                        LOG.debug(req)
                                        dp.send_msg(req)
                                    else:
                                        LOG.warn(
                                            "\t\tAdd forward to switch %d on port %d:"
                                            % (nexthop, port))
                                        actions = [
                                            parser.OFPActionOutput(port)
                                        ]
                                        inst = [
                                            parser.OFPInstructionActions(
                                                ofp.OFPIT_APPLY_ACTIONS,
                                                actions)
                                        ]
                                        prio = ofp.OFP_DEFAULT_PRIORITY + 2
                                        req = parser.OFPFlowMod(
                                            datapath=dp,
                                            match=match,
                                            instructions=inst,
                                            priority=prio)
                                        LOG.debug(req)
                                        dp.send_msg(req)

                self.forwarding_update = datetime.now()
                LOG.warn("_calc_ForwardingMatrix(): Took %s" %
                         (self.forwarding_update - forwarding_update_start))

            hub.sleep(1)
 def _monitor(self):
     while True:
         for dp in self.datapaths.values():
             self._request_stats(dp)
         hub.sleep(SimpleMonitor.QUERY_INTERVAL)
예제 #36
0
 def testFlowSnapshotMatch(self):
     fake_inout_setup(self.inout_controller)
     hub.sleep(3)
     assert_bridge_snapshot_match(self, self.BRIDGE, self.service_manager)
예제 #37
0
파일: faucet.py 프로젝트: subhav8/faucet
 def gateway_resolve_request(self):
     while True:
         self.send_event('Faucet', EventFaucetResolveGateways())
         hub.sleep(2)
예제 #38
0
 def _tdiscovery(self):
     global TOPOLOGY_DISCOVERED
     #while True:
     hub.sleep(DISCOVERY_INERVAL)
     self.get_topology_data()
     TOPOLOGY_DISCOVERED = 1
예제 #39
0
    def process_packets(self):
        while True:
            if event_queue.empty():
                hub.sleep(1)
                continue
            else:
                event_item = event_queue.get()
                msg = event_item.msg
                timestamp = event_item.timestamp
                ts = datetime.datetime.fromtimestamp(timestamp).strftime(
                    '%Y-%m-%d %H:%M:%S')
                # print(ts)
                features = None

                pkt = packet.Packet(msg.data)
                ip_packet = pkt.get_protocol(ipv4.ipv4)
                udp_seg = pkt.get_protocol(udp.udp)
                tcp_seg = pkt.get_protocol(tcp.tcp)

                if ip_packet:
                    src_ip = ip_packet.src
                    dst_ip = ip_packet.dst

                    if udp_seg:
                        src_port = str(udp_seg.src_port)
                        dst_port = str(udp_seg.dst_port)
                        # Hit the node endpoint for UDP traffic
                        url = 'http://229c8b7b.ngrok.io/slave01/api'
                        features = self.extract_udp(ip_packet, udp_seg,
                                                    timestamp)
                        # print("UDP {}".format(len(features)))

                        self.results['timestamp'] = ts  # timestamp
                        self.results['src_ip'] = src_ip
                        self.results['src_port'] = src_port
                        self.results['dst_ip'] = dst_ip
                        self.results['dst_port'] = dst_port
                        self.results['node'] = "slave01"
                        self.results['service'] = features[1]
                        self.results['result'] = self.extract_features(
                            features, url)
                        # print("Done UDP")
                        self.producer.send('test',
                                           self.results).get(timeout=30)
                    elif tcp_seg:
                        src_port = str(tcp_seg.src_port)
                        dst_port = str(tcp_seg.dst_port)
                        # Hit the node endpoint for TCP traffic
                        url = 'http://229c8b7b.ngrok.io/slave02/api'
                        features = self.extract_tcp(ip_packet, tcp_seg,
                                                    timestamp)
                        # print("TCP {}".format(len(features)))

                        self.results['timestamp'] = ts  # timestamp
                        self.results['src_ip'] = src_ip
                        self.results['src_port'] = src_port
                        self.results['dst_ip'] = dst_ip
                        self.results['dst_port'] = dst_port
                        self.results['node'] = "slave02"
                        self.results['service'] = features[1]
                        self.results['result'] = self.extract_features(
                            features, url)
                        # print("Done UDP")
                        self.producer.send('test',
                                           self.results).get(timeout=30)
 def _monitor(self):
     while True:
         for dp in self.datapaths.values():
             self._request_flow_stats(dp)
         hub.sleep(self.QUERY_DURATION)
예제 #41
0
    def _monitor(self):

        while True:
            for dp in self.datapaths.values():
                self._request_stats(dp)
            hub.sleep(5)
예제 #42
0
 def _run(self):
     while True:
         self._poll_subscriber_list()
         hub.sleep(self.config.poll_interval)
예제 #43
0
    def setUpNetworkAndController(self,
                                  vlan: str = "",
                                  non_nat_arp_egress_port: str = None,
                                  gw_mac_addr="ff:ff:ff:ff:ff:ff"):
        """
        Starts the thread which launches ryu apps

        Create a testing bridge, add a port, setup the port interfaces. Then
        launch the ryu apps for testing pipelined. Gets the references
        to apps launched by using futures.
        """
        global gw_info_map
        gw_info_map.clear()
        hub.sleep(2)

        cls = self.__class__
        super(InOutNonNatTest, cls).setUpClass()
        inout.get_mobilityd_gw_info = mocked_get_mobilityd_gw_info
        inout.set_mobilityd_gw_info = mocked_set_mobilityd_gw_info

        warnings.simplefilter('ignore')
        cls.setup_uplink_br()

        if vlan != "":
            cls._setup_vlan_network(vlan)

        cls.service_manager = create_service_manager([])

        inout_controller_reference = Future()
        testing_controller_reference = Future()

        if non_nat_arp_egress_port is None:
            non_nat_arp_egress_port = cls.DHCP_PORT

        patch_up_port_no = BridgeTools.get_ofport('patch-up')
        test_setup = TestSetup(apps=[
            PipelinedController.InOut, PipelinedController.Testing,
            PipelinedController.StartupFlows
        ],
                               references={
                                   PipelinedController.InOut:
                                   inout_controller_reference,
                                   PipelinedController.Testing:
                                   testing_controller_reference,
                                   PipelinedController.StartupFlows: Future(),
                               },
                               config={
                                   'setup_type': 'LTE',
                                   'bridge_name': cls.BRIDGE,
                                   'bridge_ip_address': cls.BRIDGE_IP,
                                   'ovs_gtp_port_number': 32768,
                                   'clean_restart': True,
                                   'enable_nat': False,
                                   'non_nat_gw_probe_frequency': 0.5,
                                   'non_nat_arp_egress_port':
                                   non_nat_arp_egress_port,
                                   'uplink_port': patch_up_port_no,
                                   'uplink_gw_mac': gw_mac_addr,
                               },
                               mconfig=None,
                               loop=None,
                               service_manager=cls.service_manager,
                               integ_test=False)

        BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
        subprocess.Popen(["ifconfig", cls.UPLINK_BR, "192.168.128.41"]).wait()
        cls.thread = start_ryu_app_thread(test_setup)
        cls.inout_controller = inout_controller_reference.result()

        cls.testing_controller = testing_controller_reference.result()
예제 #44
0
 def _monitor(self):
     while True:
         for dp in self.datapaths.values():
             self._request_stats(dp)
         self.redistribute_flows()
         hub.sleep(10)
예제 #45
0
파일: faucet.py 프로젝트: subhav8/faucet
 def host_expire_request(self):
     while True:
         self.send_event('Faucet', EventFaucetHostExpire())
         hub.sleep(5)
예제 #46
0
 def _start(self):
     sleep(4)
     self.recompile()
예제 #47
0
 def _periodic_event_loop(self):
     while True:
         hub.sleep(10)
         ev = TestEvent('DROP EVENT')
         self.logger.info('*** Send event: event.msg = %s', ev.msg)
         self.send_event_to_observers(ev)
예제 #48
0
 def gateway_resolve_request(self):
     """Trigger gateway/nexthop re/resolution."""
     while True:
         self.send_event('Faucet', EventFaucetResolveGateways())
         hub.sleep(2)
예제 #49
0
 def _keepAlive(self):
     while True:
         if self.superExist:
             self._send_keep_alive_message()
             self.logger.info("send keep alive")
         hub.sleep(5)
예제 #50
0
 def host_expire_request(self):
     """Trigger expiration of host state in controller."""
     while True:
         self.send_event('Faucet', EventFaucetHostExpire())
         hub.sleep(5)
예제 #51
0
파일: myspraft.py 프로젝트: as74150/tmp1
    def _topo_local_sync(self, interval):
        while self.is_active:
            switch_list = get_switch(self.topology_api_app, None)

            switches = [switch.dp.id for switch in switch_list]
            strTopoMessage = ''
            #print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
            if len(switches) > 0:
                for sw in switches:
                    tmpStr = str(sw) + '#'
                    strTopoMessage = strTopoMessage + tmpStr
                    #print sw
            #print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++'

            strTopoMessage = strTopoMessage.rstrip('#')
            if strTopoMessage is not '':
                strTopoMessage = strTopoMessage + '$'

            links_list = get_link(self.topology_api_app, None)
            #print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
            tmpStr = ''
            if len(links_list) > 0:
                for link in links_list:
                    linkStr = str(link.src.dpid) + ',' + str(
                        link.dst.dpid) + ',' + str(
                            link.src.port_no) + ',' + str(
                                link.dst.port_no) + ',' + str(0) + ')'
                    tmpStr = tmpStr + linkStr
                #links = [(link.src.dpid, link.dst.dpid, link.src.port_no) for link in links_list]
                #links = [(link.dst.dpid, link.src.dpid, link.dst.port_no) for link in links_list]
                tmpStr = tmpStr.rstrip(')')
                tmpStr = tmpStr + '$'
                strTopoMessage = strTopoMessage + tmpStr
                #print tmpStr
                #print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
            tmpStr = ''
            hosts_list = get_host(self.topology_api_app, None)
            for host in hosts_list:
                #print host
                #print ''
                #print host.port.hw_addr
                for host_i in hosts_list:
                    if host != host_i:
                        if abs(host.port.dpid) == abs(
                                host_i.port.dpid
                        ) and host.port.port_no == host_i.port.port_no:
                            host_i.port.dpid = -abs(host_i.port.dpid)
                            #hosts_list.remove(host_i)
                            if host.port.dpid > 0:
                                host.port.dpid = -abs(host.port.dpid)

            for host in hosts_list:
                #print host.port.dpid
                #print host.mac
                #print ' || port' + str(host.port.port_no)
                if host.port.dpid > 0:
                    #print 'tesult host'
                    #print host
                    hostStr = str(host.port.dpid) + ',' + str(
                        host.mac) + ',' + str(host.port.port_no) + '#'
                    tmpStr = tmpStr + hostStr
            tmpStr = tmpStr.rstrip('#')
            strTopoMessage = strTopoMessage + tmpStr
            #	        self.net.add_node(host.mac)
            #                self.net.add_edge(host.port.dpid, host.mac, port=host.port.port_no, weight=0)
            #                self.net.add_edge(host.mac, host.port.dpid, weight=0)
            #self.printG()
            #This is Controller1

            #get possiable interLinks massage
            try:
                f_rt_switch = open("/home/openlab/openlab/ryu/interLinks.log",
                                   "r+")
                interLinksStr = str(f_rt_switch.read())
                f_rt_switch.truncate()
            finally:
                f_rt_switch.close()
            if strTopoMessage is not '':
                strTopoMessage = strTopoMessage + '$' + interLinksStr

            strTopoMessage = '1@' + strTopoMessage
            #self.printG()
            try:
                f_rf_switch = open("/home/openlab/openlab/ryu/topoMessage.log",
                                   "w+")
                f_rf_switch.write(str(strTopoMessage))
            finally:
                f_rf_switch.close()
            #self.printG()
            hub.sleep(interval)
예제 #52
0
    def _state_change_handler(self , ev):
    datapath = ev.datapath
    if ev.state == MAIN_DISPATCHER:
        if not datapath.d in self.datapaths:
	    self.logger.debug('unregister datapath:%016x', datapath.id)
            self.datapaths[datapath.id] = datapath
	elif ev.state == DEAD_DISPATCHER:
	    if datapath.id in self.datapaths:
		self.logger.debug('unregister datapath: %016x', datapath.id)
		del self.datapaths[datapath.id]

def _monitor(self):
	while True;
	    for dp in self.datapaths.values():
		self.request_stats(dp)
	    hub.sleep(10)
		
def _request_states(self,datapath):
    self.logger.debug('send stats request: %016x', datapath.id)
    ofproto = datapath.ofproto
    parser = datapath.ofproto_parser
    req = parser.OFPFlowStatesRequest(datapath)
    datapath.send_msg(req)

    req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
    datapath.send.msg(req)

@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self,ev):
    body = ev.msg.body
예제 #53
0
    def test_multiple_subscribers(self):
        """
        Test credit tracking with multiple rules and 32 subscribers, each using
        up their quota and reporting to the OCS
        """
        subs = [
            SubContextConfig(
                'IMSI0010100000888{}'.format(i),
                '192.168.128.{}'.format(i),
                4,
            ) for i in range(32)
        ]
        quota = 1024  # bytes

        # create some rules
        rule1 = create_uplink_rule("rule1", 2, '46.10.0.1')
        rule2 = create_uplink_rule("rule2",
                                   0,
                                   '47.10.0.1',
                                   tracking=PolicyRule.NO_TRACKING)
        rule3 = create_uplink_rule("rule3", 3, '49.10.0.1')
        self.test_util.static_rules["rule1"] = rule1
        self.test_util.static_rules["rule2"] = rule2
        hub.sleep(2)  # wait for policies

        # set up mocks
        self.test_util.controller.mock_create_session = Mock(
            return_value=session_manager_pb2.CreateSessionResponse(
                credits=[
                    create_update_response("", 2, quota),
                    create_update_response("", 3, quota),
                ],
                static_rules=[
                    session_manager_pb2.StaticRuleInstall(rule_id="rule1"),
                    session_manager_pb2.StaticRuleInstall(rule_id="rule2"),
                ],
                dynamic_rules=[
                    session_manager_pb2.DynamicRuleInstall(policy_rule=rule3)
                ],
            ), )
        self.test_util.controller.mock_terminate_session = Mock(
            return_value=session_manager_pb2.SessionTerminateResponse(), )
        update_complete = hub.Queue()
        self.test_util.controller.mock_update_session = Mock(
            side_effect=get_standard_update_response(update_complete,
                                                     None,
                                                     quota,
                                                     is_final=True), )

        # initiate sessions
        for sub in subs:
            self.test_util.sessiond.CreateSession(
                session_manager_pb2.LocalCreateSessionRequest(
                    sid=SubscriberID(id=sub.imsi),
                    ue_ipv4=sub.ip,
                ), )
        self.assertEqual(
            self.test_util.controller.mock_create_session.call_count,
            len(subs))

        # send packets towards all 3 rules
        flows = [rule.flow_list[0] for rule in [rule1, rule2, rule3]]
        packets = []
        for sub in subs:
            packets.extend(get_packets_for_flows(sub, flows))
        packet_count = int(quota / len(packets[0])) + 1
        self.test_util.thread.run_in_greenthread(
            self.test_util.get_packet_sender(subs, packets, packet_count), )

        # wait for responses for keys 2 and 3 (key 1 is not tracked)
        expected_keys = {(sub.imsi, key) for sub in subs for key in [2, 3]}
        for _ in range(len(expected_keys)):
            update = get_from_queue(update_complete)
            self.assertIsNotNone(update)
            imsiKey = (update.sid, update.usage.charging_key)
            self.assertTrue(imsiKey in expected_keys)
            expected_keys.remove(imsiKey)

        for sub in subs:
            self.test_util.sessiond.EndSession(SubscriberID(id=sub.imsi))
        self.assertEqual(
            self.test_util.controller.mock_terminate_session.call_count,
            len(subs))
예제 #54
0
    def _topology_thread(self):
        while True:
            all_switch = get_all_switch(self)
            Set_all_switch(all_switch)
            all_link = get_all_link(self)
            Set_all_link(all_link)
            all_host = get_all_host(self)
            Set_all_host(all_host)
            print 'all_switch = '
            print len(Get_all_switch())
            print 'all_link = '
            print len(Get_all_link())
            print 'all_host = '
            print len(Get_all_host())
            for a in Get_all_host():
                print a.ipv4
            hub.sleep(1)
            if len(Get_all_switch()) == 6 and len(Get_all_host()) == 9:
                c = 0
                for i in range(0, len(Get_all_switch())):
                    get_TopoNumberTo().append(['switch', Get_all_switch()[i]])
                for i in range(0, len(Get_all_host())):
                    get_TopoNumberTo().append(['host', Get_all_host()[i]])
                # len(get_TopoNumberTo()) = num(switch) + num(host)

                # for x in get_TopoNumberTo():
                #    print (x,':',str(x))
                weight = []
                for i in range(0, len(get_TopoNumberTo())):
                    weight.append([999999999] * len(get_TopoNumberTo()))
                for i in range(0, len(weight)):
                    for j in range(0, len(weight[i])):
                        if i == j:
                            weight[i][j] = 0
                for link in Get_all_link():
                    indexA = 0
                    indexB = 0
                    for i in get_TopoNumberTo():
                        if i[0] == 'switch' and i[1].dp.id == link.src.dpid:
                            indexA = get_TopoNumberTo().index(i)
                        if i[0] == 'switch' and i[1].dp.id == link.dst.dpid:
                            indexB = get_TopoNumberTo().index(i)
                    weight[indexA][indexB] = 1
                    weight[indexB][indexA] = 1
                for host in Get_all_host():
                    indexA = 0
                    indexB = 0
                    for i in get_TopoNumberTo():
                        if i[0] == 'host' and i[1] == host:
                            indexA = get_TopoNumberTo().index(i)
                        if i[0] == 'switch' and i[1].dp.id == host.port.dpid:
                            indexB = get_TopoNumberTo().index(i)
                    # Link(A,B) = 1
                    weight[indexA][indexB] = 1
                    weight[indexB][indexA] = 1
                print weight
                # forwarding matrix to forwarding Table
                forwardingMatrix, distance = MakeForwardingTable(weight)
                print forwardingMatrix
                for i in range(0, len(get_TopoNumberTo())):
                    if get_TopoNumberTo()[i][0] == 'switch':
                        # get switch dpid
                        switchdp = get_TopoNumberTo()[i][1].dp.id
                        get_forwardingTable()[switchdp] = {}
                        for j in range(0, len(forwardingMatrix[i])):
                            if get_TopoNumberTo()[j][0] == 'host':
                                dsthost = get_TopoNumberTo()[j][1].mac
                                Pport = -1
                                if get_TopoNumberTo()[forwardingMatrix[i]
                                                      [j]][0] == 'switch':
                                    for link in Get_all_link():
                                        if link.src.dpid == switchdp and link.dst.dpid == get_TopoNumberTo(
                                        )[forwardingMatrix[i][j]][1].dp.id:
                                            Pport = link.src.port_no
                                if get_TopoNumberTo()[forwardingMatrix[i]
                                                      [j]][0] == 'host':
                                    Pport = get_TopoNumberTo(
                                    )[j][1].port.port_no
                                if Pport == -1:
                                    print 'host not found'
                                    return
                                else:
                                    get_forwardingTable(
                                    )[switchdp][dsthost] = Pport

                print get_forwardingTable()
                for i in range(0, len(distance)):
                    if get_TopoNumberTo()[i][0] == 'host':
                        get_distanceTable()[get_TopoNumberTo()[i][1].mac] = {}
                        for j in range(0, len(distance[i])):
                            if get_TopoNumberTo()[j][0] == 'host':
                                get_distanceTable()[get_TopoNumberTo(
                                )[i][1].mac][get_TopoNumberTo()[j]
                                             [1].mac] = distance[i][j]
                print get_distanceTable()

                # arpMatrix to ArpTable
                check = [0] * len(get_TopoNumberTo())
                check[0] = 1
                arpMatrix = []
                for i in range(0, len(get_TopoNumberTo())):
                    arpMatrix.append([0] * len(get_TopoNumberTo()))
                SPTqueue = []
                SPTqueue.append(0)
                while len(SPTqueue) != 0:
                    i = SPTqueue.pop(0)
                    for j in range(0, len(get_TopoNumberTo())):
                        if weight[i][j] == 1 and check[j] == 0:
                            arpMatrix[i][j] = 1
                            arpMatrix[j][i] = 1
                            check[j] = 1
                            SPTqueue.append(j)
                '''for i in range(0,len(get_TopoNumberTo())):
                    for j in range(0,len(get_TopoNumberTo())):
                        if weight[i][j]==1 and check[j] == 0:
                            arpMatrix[i][j]=1
                            arpMatrix[j][i]=1
                            check[j]=1'''

                print "get_TopoNumberTo() = "
                # UseLess
                # for i in get_TopoNumberTo():
                #    print 'get_TopoNumberTo()['+str(i)+'] = '+str(get_TopoNumberTo()[0])
                print "arpMatrix = "
                print arpMatrix
                for i in range(0, len(get_TopoNumberTo())):
                    if get_TopoNumberTo()[i][0] == 'switch':
                        switchdp = get_TopoNumberTo()[i][1].dp.id
                        Get_ArpTable()[switchdp] = []
                        for j in range(0, len(arpMatrix[i])):
                            if arpMatrix[i][j] == 1:
                                Pport = -1
                                if get_TopoNumberTo()[j][0] == 'switch':
                                    for link in Get_all_link():
                                        if link.src.dpid == switchdp and link.dst.dpid == get_TopoNumberTo(
                                        )[j][1].dp.id:
                                            Pport = link.src.port_no
                                if get_TopoNumberTo()[j][0] == 'host':
                                    Pport = get_TopoNumberTo(
                                    )[j][1].port.port_no
                                if Pport == -1:
                                    print 'Pport not found'
                                    return
                                else:
                                    Get_ArpTable()[switchdp].append(Pport)
                print "ARP Table = "
                print Get_ArpTable()
                print "get_toponumberto = "
                print get_TopoNumberTo()
                Set_ready(True)
                break
예제 #55
0
    def _aliaser_boi(self):

        while True:
            '''
                NOTE: THE DEFAULT CONFIG FILE IS PLACED HERE
                      WHICH ENABLES EDITING DURING RUN TIME
            '''
            print(str(self.datapaths))
            for dp_id, rules in default.default_list.iteritems():
                if not dp_id in self.datapaths:
                    continue
                for rule_set in rules:
                    dp = self.datapaths[dp_id]
                    ofproto = dp.ofproto
                    parser = dp.ofproto_parser
                    act_set = parser.OFPActionSetField
                    act_out = parser.OFPActionOutput
                    '''
                        NOTE: The eth_type and ip_proto fields
                              have constants imported from
                              ether_types and in_proto.
                    '''

                    try:
                        # OUTGOING PACKET FLOWS (SRC PERCEPTION)
                        match = parser.OFPMatch(
                            eth_type=ETH_TYPE_IP,
                            ip_proto=IPPROTO_TCP,
                            ipv4_src=rule_set["ipv4_addr_src"],
                            ipv4_dst=rule_set["ipv4_addr_dst"],
                        )

                        actions = [
                            act_set(eth_dst=rule_set["eth_addr_fake"]),
                            act_set(ipv4_dst=rule_set["ipv4_addr_fake"]),
                            act_out(self.mac_to_port[dp_id][
                                rule_set["eth_addr_fake"]])
                        ]
                        # The second param passed to add_flow (priority)
                        # is an arbitrary value (for now)
                        super(Rerouter, self).add_flow(dp, 15, match, actions)

                        # INCOMING PACKET FLOWS (SRC PERCEPTION)
                        match = parser.OFPMatch(
                            eth_type=ETH_TYPE_IP,
                            ip_proto=IPPROTO_TCP,
                            ipv4_src=rule_set["ipv4_addr_fake"],
                            ipv4_dst=rule_set["ipv4_addr_src"],
                        )

                        actions = [
                            act_set(eth_src=rule_set["eth_addr_dst"]),
                            act_set(ipv4_src=rule_set["ipv4_addr_dst"]),
                            act_out(self.mac_to_port[dp_id][
                                rule_set["eth_addr_src"]])
                        ]
                        # The second param passed to add_flow
                        # is an arbitrary value (for now)
                        super(Rerouter, self).add_flow(dp, 15, match, actions)

                    except KeyError:
                        continue

            hub.sleep(10)
예제 #56
0
    def test_out_of_credit(self):
        """
        Initiate subscriber, return 1 static policy, send traffic to match the
        policy, verify update is sent, return final credits, use up final
        credits, ensure that no traffic can be sent
        """
        sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', 4)
        quota = 1024  # bytes

        self.test_util.controller.mock_create_session = Mock(
            return_value=session_manager_pb2.CreateSessionResponse(
                credits=[
                    session_manager_pb2.CreditUpdateResponse(
                        success=True,
                        sid=sub1.imsi,
                        charging_key=1,
                        credit=session_manager_pb2.ChargingCredit(
                            granted_units=session_manager_pb2.GrantedUnits(
                                total=session_manager_pb2.CreditUnit(
                                    is_valid=True,
                                    volume=quota,
                                ), ), ),
                    )
                ],
                static_rules=[
                    session_manager_pb2.StaticRuleInstall(
                        rule_id="simple_match")
                ],
                dynamic_rules=[],
                usage_monitors=[],
            ), )

        self.test_util.controller.mock_terminate_session = Mock(
            return_value=session_manager_pb2.SessionTerminateResponse(), )

        update_complete = hub.Queue()
        self.test_util.controller.mock_update_session = Mock(
            side_effect=get_standard_update_response(update_complete,
                                                     None,
                                                     quota,
                                                     is_final=True), )

        self.test_util.sessiond.CreateSession(
            session_manager_pb2.LocalCreateSessionRequest(
                sid=SubscriberID(id=sub1.imsi),
                ue_ipv4=sub1.ip,
            ), )
        self.assertEqual(
            self.test_util.controller.mock_create_session.call_count, 1)

        packets = get_packets_for_flows(
            sub1, self.test_util.static_rules["simple_match"].flow_list)
        packet_count = int(quota / len(packets[0])) + 1
        send_packets = self.test_util.get_packet_sender([sub1], packets,
                                                        packet_count)

        self.test_util.thread.run_in_greenthread(send_packets)
        self.assertIsNotNone(get_from_queue(update_complete))
        self.assertEqual(
            self.test_util.controller.mock_update_session.call_count, 1)

        # use up last credits
        self.test_util.thread.run_in_greenthread(send_packets)
        hub.sleep(3)  # wait for sessiond to terminate rule after update

        pkt_diff = self.test_util.thread.run_in_greenthread(send_packets)
        self.assertEqual(pkt_diff, 0)

        self.test_util.proxy_responder.ChargingReAuth(
            session_manager_pb2.ChargingReAuthRequest(
                charging_key=1,
                sid=sub1.imsi,
            ), )
        get_from_queue(update_complete)
        self.assertEqual(
            self.test_util.controller.mock_update_session.call_count, 2)
        # wait for 1 update to trigger credit request, another to trigger
        # rule activation
        # TODO Add future to track when flows are added/deleted
        hub.sleep(5)
        pkt_diff = self.test_util.thread.run_in_greenthread(send_packets)
        self.assertGreater(pkt_diff, 0)

        self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi))
        self.assertEqual(
            self.test_util.controller.mock_terminate_session.call_count, 1)
예제 #57
0
파일: Monitoring.py 프로젝트: jiakaiyu/tSDX
    def _handle_OSNR_monitoring_request(self, ev):
        #pass
        #send OFPT_GET_OSNR_REQUEST to agent
        #setup a timer in south_timer
        self.logger.debug(
            'Monitoring module receives South_OSNRMonitoringRequestEvent')
        new_timer = Database.Timer()
        new_timer.traf_id = ev.traf_id
        new_timer.timer_type = TIMER_OSNR_MONITORING
        new_timer.end_time = time.time() + SOUTH_WAITING_TIME
        Database.Data.south_timer.append(new_timer)
        self.logger.debug('ev.traf_id = %d' % ev.traf_id)
        self.logger.debug('ev.route_type = %d' % ev.route_type)
        for this_lsp in Database.Data.lsp_list.lsp_list:
            if this_lsp.traf_id == ev.traf_id and this_lsp.route_type == ev.route_type:
                new_msgs = Database.LSP_msg_list()
                new_msgs.lsp_id = this_lsp.lsp_id
                new_msgs.route_type = this_lsp.route_type
        new_timer.lsp_msg_list.append(new_msgs)
        Database.Data.message_id += 1
        new_msgs.msgs[0] = Database.Data.message_id
        Database.Data.message_id += 1
        new_msgs.msgs[1] = Database.Data.message_id
        #for resording excution time
        if Database.Data.south_osnr_monitor_time == 0:
            Database.Data.south_osnr_monitor_time = time.time()
        else:
            self.logger.critical('south_osnr_monitor_time error! \n')
        #for resording excution time end
        new_node = this_lsp.explicit_route.route[0]
        if new_node != None:
            dpid = DPID
            datapath = Database.Data.ip2datapath[new_node.node_ip]
            msg_id = new_msgs.msgs[0]
            mod = datapath.ofproto_parser.OFPTGetOSNRRequest(
                datapath,
                datapath_id=dpid,
                message_id=msg_id,
                ITU_standards=ITU_C_50,
                node_id=Database.Data.phy_topo.get_node_id_by_ip(
                    new_node.node_ip),
                port_id=new_node.add_port_id,
                start_channel=this_lsp.occ_chnl[0],
                end_channel=this_lsp.occ_chnl[-1],
                experiment1=0,
                experiment2=0)
            datapath.send_msg(mod)
            self.logger.info(
                'a OSNR monitor request is sent by RYU. (Monitoring: _handle_OSNR_monitoring_request)'
            )
            self.logger.debug('msg_id = %d' % msg_id)
            self.logger.debug(
                'node_id = %d' %
                Database.Data.phy_topo.get_node_id_by_ip(new_node.node_ip))
            self.logger.debug('port_id = %d' % new_node.add_port_id)
            hub.sleep(0.05)
            #new_msgs.msgs.append(Database.Data.message_id)
        new_node = this_lsp.explicit_route.route[-1]
        if new_node != None:
            dpid = DPID
            datapath = Database.Data.ip2datapath[new_node.node_ip]
            msg_id = new_msgs.msgs[1]
            mod = datapath.ofproto_parser.OFPTGetOSNRRequest(
                datapath,
                datapath_id=dpid,
                message_id=msg_id,
                ITU_standards=ITU_C_50,
                node_id=Database.Data.phy_topo.get_node_id_by_ip(
                    new_node.node_ip),
                port_id=new_node.drop_port_id,
                start_channel=this_lsp.occ_chnl[0],
                end_channel=this_lsp.occ_chnl[-1],
                experiment1=0,
                experiment2=0)
            datapath.send_msg(mod)
            self.logger.info(
                'a OSNR monitor request is sent by RYU. (Monitoring: _handle_OSNR_monitoring_request)'
            )
            self.logger.debug('msg_id = %d' % msg_id)
            self.logger.debug(
                'node_id = %d' %
                Database.Data.phy_topo.get_node_id_by_ip(new_node.node_ip))
            self.logger.debug('port_id = %d' % new_node.drop_port_id)
            #new_msgs.msgs.append(Database.Data.message_id)

        if (not new_msgs.msgs) and (new_msgs in new_timer.lsp_msg_list):
            new_timer.lsp_msg_list.remove(new_msgs)
        if (new_timer.lsp_msg_list == []) and (new_timer
                                               in Database.Data.south_timer):
            Database.Data.south_timer.remove(new_timer)
            self.logger.info(
                'No unprovisioned LSPs are found! (Monitoring: _handle_OSNR_monitoring_request)'
            )
        '''# for testing
예제 #58
0
 def _cyclic_ra(self):
     while True:
         self._send_ra()
         hub.sleep(ra_interval)
예제 #59
0
 def _monitor(self):
     while True:
         self.timex = str(time.time())
         for dp in self.datapaths.values():
             self._request_stats(dp)
         hub.sleep(5)
예제 #60
0
    def test_passthrough_rules(self):
        """
           Add UE MAC flows for two subscribers
        """
        imsi_1 = 'IMSI010000000088888'
        other_mac = '5e:cc:cc:b1:aa:aa'
        cli_ip = '1.1.1.1'
        server_ip = '151.42.41.122'

        # Add subscriber with UE MAC address """
        self.ue_mac_controller.add_ue_mac_flow(imsi_1, self.UE_MAC_1)

        # Create a set of packets
        pkt_sender = ScapyPacketInjector(self.BRIDGE)

        # Only send downlink as the pkt_sender sends pkts from in_port=LOCAL
        downlink_packet1 = EtherPacketBuilder() \
            .set_ether_layer(self.UE_MAC_1, other_mac) \
            .build()
        dhcp_packet = DHCPPacketBuilder() \
            .set_ether_layer(self.UE_MAC_1, other_mac) \
            .set_ip_layer(server_ip, cli_ip) \
            .set_udp_layer(67, 68) \
            .set_bootp_layer(2, cli_ip, server_ip, other_mac) \
            .set_dhcp_layer([("message-type", "ack"), "end"]) \
            .build()
        dns_packet = UDPPacketBuilder() \
            .set_ether_layer(self.UE_MAC_1, other_mac) \
            .set_ip_layer('151.42.41.122', '1.1.1.1') \
            .set_udp_layer(53, 32795) \
            .build()
        arp_packet = ARPPacketBuilder() \
            .set_ether_layer(self.UE_MAC_1, other_mac) \
            .set_arp_layer('1.1.1.1') \
            .set_arp_hwdst(self.UE_MAC_1) \
            .set_arp_src(other_mac, '1.1.1.12') \
            .build()

        # Check if these flows were added (queries should return flows)
        flow_queries = [
            FlowQuery(self._tbl_num, self.testing_controller,
                      match=MagmaMatch(eth_dst=self.UE_MAC_1))
        ]

        # =========================== Verification ===========================
        # Verify 9 flows installed for ue_mac table (3 pkts matched)
        #        4 flows installed for inout (3 pkts matched)
        #        2 flows installed (2 pkts matches)
        flow_verifier = FlowVerifier(
            [
                FlowTest(FlowQuery(self._tbl_num,
                                   self.testing_controller), 4, 9),
                FlowTest(FlowQuery(self._ingress_tbl_num,
                                   self.testing_controller), 4, 2),
                FlowTest(FlowQuery(self._egress_tbl_num,
                                   self.testing_controller), 0, 2),
                FlowTest(flow_queries[0], 4, 4),
            ], lambda: wait_after_send(self.testing_controller))

        snapshot_verifier = SnapshotVerifier(self, self.BRIDGE,
                                             self.service_manager)

        with flow_verifier, snapshot_verifier:
            pkt_sender.send(dhcp_packet)
            pkt_sender.send(downlink_packet1)
            pkt_sender.send(dns_packet)
            hub.sleep(3)
            pkt_sender.send(arp_packet)

        flow_verifier.verify()