def main(): in_rates = [10e6, 100e6, 1000e6] ctrl_rates = [] out_dir = "control_test" sleep_per_test = 10 # seconds setLogLevel("info") topo = TestTopo(2) net = Mininet(topo=topo, controller=None) net.start() net.ping() hosts = net.hosts src_host = hosts[0] dst_host = hosts[1] ctrl_iface = connect_controller(net, src_host) server_proc = launch_goben_server(dst_host) time.sleep(2) for in_rate in in_rates: ctrl_proc = launch_ctrl_client(src_host, in_rate) tx_rate, bw_proc = init_rate_control(ctrl_iface, in_rate) time.sleep(0.5) ctrl_rates = generate_ctrl_rates(in_rate) client_proc = launch_goben_client(src_host, dst_host, in_rate) for ctrl_rate in ctrl_rates: log.info("#############################") tx_rate[0] = ctrl_rate dc_utils.start_process("tc qdisc show dev h0-eth0", src_host) record_rate(in_rate, ctrl_rate, sleep_per_test, out_dir) log.info("#############################") dc_utils.kill_processes([ctrl_proc, client_proc, bw_proc]) dc_utils.kill_processes([server_proc]) summarize(out_dir) net.stop()
def simple(): t=MyTopo(); #c=Controller(name="c0",command="python ./pox/pox.py") #net=Mininet(t,controller=bridge); net=Mininet(topo=t,controller=lambda name:RemoteController(name,ip='127.0.0.1')) #net.addController(name="c0",port=6633); #mininet.node.RemoteController(port=6633) net.start(); #print net.host; f=open("MacHost.txt","w"); for i in net.hosts: print "i= "; print i; print Node.MAC(i); f.write(str(i)+" "+str(Node.MAC(i))+"\n"); f.close(); z=0 f=open("/home/saumya/pox/output.txt","w") f.close() for i in net.hosts: for j in net.hosts: if(i!=j): time.sleep(10); net.ping([i,j]) z=z+1; parse("/home/saumya/pox/output.txt"); draw_graph();
def simpleTest(): "Create and test a simple network" topo = FatTree(4) net = Mininet(topo, controller=RemoteController, link=TCLink) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" #ping_all_cmd = "fping -t 10 -l -p 5000 " + " ".join([host.IP() for host in net.hosts])+" > /tmp/%s_logs.txt &" #for host in net.hosts: # host.cmd(ping_all_cmd%host.name) #print(dir(host)) # for host in net.hosts: # term.makeTerm(host) try: while True: net.ping(timeout=5) time.sleep(6) except KeyboardInterrupt: pass CLI(net) net.stop()
def standalone(): if "info" in argv: setLogLevel( 'info' ) scls=None if "of" in argv: scls = StaticSwitch elif "lr" in argv: scls = DRtr else: print("Supply either of (for OpenFlow) or lr (for a Linux Router)") exit(-1) topo = DTopo(scls=scls) net = Mininet(topo=topo,autoSetMacs=True) net.start() sw, h1, h2 = net.get('s1', 'h1', 'n1') if "noarp" not in argv: makearpentries(sw, [h1, h2]) #print(sw.cmd("netstat -tulpen")) if "dump" in argv: if "lr" in argv: dump('iptables-save', sw.cmd('iptables-save')) dump('ip-route', sw.cmd('ip route')) dump('collectedmacs', sw.cmd('collectmacs.sh')) elif "of" in argv: print("of/dump TODO") if "test" in argv: sese = [h1,h2] net.ping(sese) tcpreachtests(net,sese,ports=[80,22]) if "cli" in argv: CLI(net) net.stop()
def linearBandwidthTest(lengths): "Check bandwidth at various lengths along a switch chain." results = {} switchCount = max(lengths) hostCount = switchCount + 1 switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } # UserSwitch is horribly slow with recent kernels. # We can reinstate it once its performance is fixed del switches['reference user'] topo = LinearTestTopo(hostCount) # Select TCP Reno output = quietRun('sysctl -w net.ipv4.tcp_congestion_control=reno') assert 'reno' in output for datapath in switches.keys(): info("*** testing", datapath, "datapath\n") Switch = switches[datapath] results[datapath] = [] link = partial(TCLink, delay='2ms', bw=10) net = Mininet(topo=topo, switch=Switch, controller=Controller, waitConnected=True, link=link) net.start() info("*** testing basic connectivity\n") for n in lengths: net.ping([net.hosts[0], net.hosts[n]]) info("*** testing bandwidth\n") for n in lengths: src, dst = net.hosts[0], net.hosts[n] # Try to prime the pump to reduce PACKET_INs during test # since the reference controller is reactive src.cmd('telnet', dst.IP(), '5001') info("testing", src.name, "<->", dst.name, '\n') # serverbw = received; _clientbw = buffered serverbw, _clientbw = net.iperf([src, dst], seconds=10) info(serverbw, '\n') flush() results[datapath] += [(n, serverbw)] net.stop() for datapath in switches.keys(): info("\n*** Linear network results for", datapath, "datapath:\n") result = results[datapath] info("SwitchCount\tiperf Results\n") for switchCount, serverbw in result: info(switchCount, '\t\t') info(serverbw, '\n') info('\n') info('\n')
def createTopo(): global Fat_Tree_Level logging.debug("Create Topo") topo = CustomTopo() logging.debug("Start Mininet") CONTROLLER_IP = "127.0.0.1" CONTROLLER_PORT = 6633 net = Mininet(topo=topo, link=TCLink, controller=None) net.addController('controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT) net.start() time.sleep(3) for i in net.hosts: net.ping([net.hosts[0], i]) net.ping([net.hosts[1], i]) time.sleep(3) if not os.path.exists(sys.argv[1]): os.makedirs(sys.argv[1], mode=7777) for i in range(0, 2 * Fat_Tree_Level): print net.hosts[(-1) * (i + 1)].cmd('python ../UDPserver.py 5001 ' + sys.argv[1] + '/Server' + str(2 * Fat_Tree_Level - i) + '.log &') time.sleep(3) random.seed(a=5) global client_per_node a = random.sample(range(4 * Fat_Tree_Level), 4 * Fat_Tree_Level) print 'random sequence is : ' + str(a) for i in a: print net.hosts[i].cmd('python ../UDPclient.py ' + sys.argv[1] + '/client' + str(i) + '.csv ' + str(net.hosts[(-2) * Fat_Tree_Level].IP()) + ' 5001 &') time.sleep(3) print 'Main IP is :' print net.hosts[(-2) * Fat_Tree_Level].IP() print 'Other IPs are :' for i in range(0, 2 * Fat_Tree_Level): print net.hosts[(-1) * (i + 1)].IP() print 'time.sleep(60)' time.sleep(60) net.stop() os.system('sudo chmod 7777 ' + sys.argv[1] + ' ' + sys.argv[1] + '/*')
def createTopo(): global Fat_Tree_Level logging.debug("Create Topo") topo = CustomTopo() logging.debug("Start Mininet") CONTROLLER_IP = "127.0.0.1" CONTROLLER_PORT = 6633 net = Mininet(topo=topo, link=TCLink, controller=None) net.addController('controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT) net.start() time.sleep(3) #net.pingAll() for i in net.hosts: net.ping([net.hosts[0], i]) net.ping([net.hosts[1], i]) time.sleep(3) #print net.hosts[1].cmd('tcpdump -w server1Log.dmp &') #print net.hosts[2].cmd('tcpdump -w server2Log.dmp &') #print net.hosts[3].cmd('tcpdump -w server3Log.dmp &') #time.sleep(1) for i in range(0, 2 * Fat_Tree_Level): print net.hosts[(-1) * (i + 1)].cmd('iperf -s -u -i 1 -p 5001 -w 2048K &') time.sleep(3) random.seed(a=5) global client_per_node a = random.sample(range(4 * Fat_Tree_Level), 4 * Fat_Tree_Level) print 'random sequence is : ' + str(a) for i in a: print net.hosts[i].cmd('iperf -c ' + str(net.hosts[(-2) * Fat_Tree_Level].IP()) + ' -p 5001 -i 1 -t 3000 -u -w 2048K &') time.sleep(1) print 'Main IP is :' print net.hosts[(-2) * Fat_Tree_Level].IP() print 'Other IPs are :' for i in range(0, 2 * Fat_Tree_Level): print net.hosts[(-1) * (i + 1)].IP() CLI(net) net.stop()
def linearBandwidthTest( lengths ): "Check bandwidth at various lengths along a switch chain." results = {} switchCount = max( lengths ) hostCount = switchCount + 1 switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } # UserSwitch is horribly slow with recent kernels. # We can reinstate it once its performance is fixed del switches[ 'reference user' ] topo = LinearTestTopo( hostCount ) # Select TCP Reno output = quietRun( 'sysctl -w net.ipv4.tcp_congestion_control=reno' ) assert 'reno' in output for datapath in switches.keys(): print( "*** testing", datapath, "datapath" ) Switch = switches[ datapath ] results[ datapath ] = [] link = partial( TCLink, delay='1ms' ) net = Mininet( topo=topo, switch=Switch, controller=Controller, waitConnected=True, link=link ) net.start() print( "*** testing basic connectivity" ) for n in lengths: net.ping( [ net.hosts[ 0 ], net.hosts[ n ] ] ) print( "*** testing bandwidth" ) for n in lengths: src, dst = net.hosts[ 0 ], net.hosts[ n ] # Try to prime the pump to reduce PACKET_INs during test # since the reference controller is reactive src.cmd( 'telnet', dst.IP(), '5001' ) print( "testing", src.name, "<->", dst.name ) bandwidth = net.iperf( [ src, dst ], seconds=10 ) print( bandwidth ) flush() results[ datapath ] += [ ( n, bandwidth ) ] net.stop() for datapath in switches.keys(): print() print( "*** Linear network results for", datapath, "datapath:" ) print() result = results[ datapath ] print( "SwitchCount\tiperf Results" ) for switchCount, bandwidth in result: print( switchCount, '\t\t' ) print( bandwidth[ 0 ], 'server, ', bandwidth[ 1 ], 'client' ) print() print()
def simpleTest(): topo = DuoSwitchTopo() net = Mininet(topo=topo, controller=RemoteController, link=TCLink) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" # net.pingAll() net.ping([net.hosts[0], net.hosts[1]]) net.stop()
def bandwidthTest(algorithm, delay): "Check bandwidth at various lengths along a switch chain." results = {} switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } del switches['reference user'] topo = MyTopo(delay, n=3) output = quietRun("sysctl -w net.ipv4.tcp_congestion_control=" + algorithm) assert algorithm in output for datapath in switches.keys(): info("*** testing", datapath, "datapath\n") Switch = switches[datapath] results[datapath] = [] link = partial(TCLink, delay='2ms', bw=10) net = Mininet(topo=topo, switch=Switch, controller=Controller, waitConnected=True, link=link) net.start() info("*** testing basic connectivity\n") for n in range(0, 4): net.ping([net.hosts[0], net.hosts[n]]) info("*** testing bandwidth\n") for n in range(2, 4): src, dst = net.hosts[0], net.hosts[n] # Try to prime the pump to reduce PACKET_INs during test # since the reference controller is reactive src.cmd('telnet', dst.IP(), '5001') info("testing", src.name, "<->", dst.name, '\n') t1 = threading.Thread(target=myIperf, args=(net, (net.hosts[2], net.hosts[0]), 1000, 5001, "/home/mininet/projects/modlogs/" + algorithm + delay + "1.modlog")) t2 = threading.Thread(target=myIperf, args=(net, (net.hosts[3], net.hosts[1]), 750, 5001, "/home/mininet/projects/modlogs/" + algorithm + delay + "2.modlog")) t1.start() time.sleep(250) t2.start() time.sleep(780) #net.stop() info('\n') info('\n')
def linearBandwidthTest(lengths): "Check bandwidth at various lengths along a switch chain." results = {} switchCount = max(lengths) hostCount = switchCount + 1 switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } # UserSwitch is horribly slow with recent kernels. # We can reinstate it once its performance is fixed del switches['reference user'] topo = LinearTestTopo(hostCount) for datapath in switches.keys(): print "*** testing", datapath, "datapath" Switch = switches[datapath] results[datapath] = [] link = partial(TCLink, delay='1ms') net = Mininet(topo=topo, switch=Switch, controller=Controller, waitConnected=True, link=link) net.start() print "*** testing basic connectivity" for n in lengths: net.ping([net.hosts[0], net.hosts[n]]) print "*** testing bandwidth" for n in lengths: src, dst = net.hosts[0], net.hosts[n] print "testing", src.name, "<->", dst.name, bandwidth = net.iperf([src, dst]) print bandwidth flush() results[datapath] += [(n, bandwidth)] net.stop() for datapath in switches.keys(): print print "*** Linear network results for", datapath, "datapath:" print result = results[datapath] print "SwitchCount\tiperf Results" for switchCount, bandwidth in result: print switchCount, '\t\t', print bandwidth[0], 'server, ', bandwidth[1], 'client' print print
def testPing(self): "Create the network and run a ping test" net = Mininet(topo=self.topoClass(2, 2, 4, 2), controller=self.controller, host=Host, switch=OVSSwitch, link=TCLink, waitConnected=True) net.start() net.ping() # net.ping([net.hosts[0], "10.0.0.2"]) net.myiperf(net.hosts, 10)
def linearBandwidthTest( lengths ): "Check bandwidth at various lengths along a switch chain." results = {} switchCount = max( lengths ) hostCount = switchCount + 1 switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } # UserSwitch is horribly slow with recent kernels. # We can reinstate it once its performance is fixed del switches[ 'reference user' ] topo = LinearTestTopo( hostCount ) for datapath in switches.keys(): print "*** testing", datapath, "datapath" Switch = switches[ datapath ] results[ datapath ] = [] link = partial( TCLink, delay='1ms' ) net = Mininet( topo=topo, switch=Switch, controller=Controller, waitConnected=True, link=link ) net.start() print "*** testing basic connectivity" for n in lengths: net.ping( [ net.hosts[ 0 ], net.hosts[ n ] ] ) print "*** testing bandwidth" for n in lengths: src, dst = net.hosts[ 0 ], net.hosts[ n ] print "testing", src.name, "<->", dst.name, bandwidth = net.iperf( [ src, dst ] ) print bandwidth flush() results[ datapath ] += [ ( n, bandwidth ) ] net.stop() for datapath in switches.keys(): print print "*** Linear network results for", datapath, "datapath:" print result = results[ datapath ] print "SwitchCount\tiperf Results" for switchCount, bandwidth in result: print switchCount, '\t\t', print bandwidth[ 0 ], 'server, ', bandwidth[ 1 ], 'client' print print
def createTopo(): logging.debug("Create Topo") topo = CustomTopo() logging.debug("Start Mininet") CONTROLLER_IP = "127.0.0.1" CONTROLLER_PORT = 6633 net = Mininet(topo=topo, link=TCLink, controller=None) net.addController('controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT) net.start() time.sleep(3) #net.pingAll() print '***********************' # just like pingall for i in net.hosts: net.ping([net.hosts[0], i]) net.ping([net.hosts[1], i]) print '***********************' time.sleep(3) #print net.hosts[1].cmd('tcpdump -w server1Log.dmp &') #print net.hosts[2].cmd('tcpdump -w server2Log.dmp &') #print net.hosts[3].cmd('tcpdump -w server3Log.dmp &') #time.sleep(1) print net.hosts[-1].cmd('iperf -s -u -i 1 -p 5001 -w 2048K &') print net.hosts[-2].cmd('iperf -s -u -i 1 -p 5001 -w 2048K &') print net.hosts[-3].cmd('iperf -s -u -i 1 -p 5001 -w 2048K &') time.sleep(3) random.seed(a=5) global client_per_node a = random.sample(range(3 * client_per_node), 3 * client_per_node) print a #print net.hosts[0].cmd('iperf -c '+str(net.hosts[-3].IP())+' -p 5001 -i 1 -t 3000 -u -w 2048K &') for i in a: print net.hosts[i].cmd('iperf -c ' + str(net.hosts[-3].IP()) + ' -p 5001 -i 1 -t 3000 -u -w 2048K &') time.sleep(1) print net.hosts[-3].IP() CLI(net) net.stop()
def startNetwork(): info( '** Creating Quagga network\n' ) topo = QuaggaTopo() global net net = Mininet(topo, controller=None ) net.start() info( '** Dumping host connections\n' ) dumpNodeConnections(net.legacyRouters) info( '** Testing network connectivity\n' ) net.ping(net.legacyRouters) info( '** Running CLI\n' ) CLI( net )
def createTopo(): logging.debug("Create Topo") topo = CustomTopo() logging.debug("Start Mininet") CONTROLLER_IP = "127.0.0.1" CONTROLLER_PORT = 6633 net = Mininet(topo=topo, link=TCLink, controller=None) net.addController('controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT) net.start() time.sleep(1) for i in net.hosts: net.ping([net.hosts[0], i]) net.ping([net.hosts[1], i]) time.sleep(3) if not os.path.exists(sys.argv[1]): os.makedirs(sys.argv[1], mode=7777) print net.hosts[-3].cmd('python ../server.py 5001 ' + sys.argv[1] + '/Server' + str(1) + '.log &') print net.hosts[-2].cmd('python ../server.py 5001 ' + sys.argv[1] + '/Server' + str(2) + '.log &') print net.hosts[-1].cmd('python ../server.py 5001 ' + sys.argv[1] + '/Server' + str(3) + '.log &') time.sleep(1) random.seed(a=5) global client_per_node a = random.sample(range(3 * client_per_node), 3 * client_per_node) print a for i in a: print net.hosts[i].cmd('python ../client.py ' + sys.argv[1] + '/client' + str(i) + '.csv ' + str(net.hosts[-3].IP()) + ' 5001 &') time.sleep(3) print net.hosts[-3].IP() #CLI(net) print 'time.sleep(60)' time.sleep(60) net.stop() os.system('sudo chmod 7777 ' + sys.argv[1] + ' ' + sys.argv[1] + '/*')
def testLinkLoss( self ): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting packet loss with %d%% loss rate\n' 'number of dropped pings during mininet.ping(): %s\n' 'expected number of dropped packets: 1\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % ( LOSS_PERCENT, dropped_total, N, loptsStr, self.switchClass ) ) self.assertGreater( dropped_total, 0, msg )
def openSwitchNet(): net = Mininet( controller=Controller, switch=OVSSwitch, link=TCLink ) info( "*** Creating (reference) controllers\n" ) c = net.addController( 'c1', port=6633 ) info( "*** Creating switches\n" ) #add switch here #add h0,h1,h2 here info( "*** Creating hosts\n" ) #add link h0-s0 include interface s0-eth info( "*** Creating links\n" ) info( "*** Starting network\n" ) net.build() c.start() s0.start([c]) # Ping All Host info( '\n', net.ping() ,'\n' ) # Set Queue Discipline to htb info( '\n*** Queue Disicline :\n' ) # reset queue discipline s0.cmdPrint( 'tc qdisc del dev s0-eth0 root' ) # add queue discipline root s0.cmdPrint( 'tc qdisc add dev s0-eth0 root handle 1:0 htb ' ) #Add classs for root s0.cmdPrint( 'tc class add dev s0-eth0 parent 1: classid 1:1 htb rate 10Mbit ' ) # add queue dicipline classes s0.cmdPrint( 'tc class add dev s0-eth0 parent 1:1 classid 1:2 htb rate 4Mbit ceil 3Mbit ' ) s0.cmdPrint( 'tc class add dev s0-eth0 parent 1:1 classid 1:3 htb rate 4Mbit ceil 1Mbit ' ) #Add pfifo queuing s0.cmdPrint( ' tc class add dev s0-eth0 parent 1:2 classid 1:21 htb rate 2Mbit ceil 2Mbit') s0.cmdPrint( ' tc class add dev s0-eth0 parent 1:3 classid 1:31 htb rate 3Mbit ceil 2Mbit') s0.cmdPrint( ' tc qdisc add dev s0-eth0 parent 1:21 handle 210: pfifo limit 20') s0.cmdPrint( ' tc qdisc add dev s0-eth0 parent 1:31 handle 310: pfifo limit 10') # add queue dicipline filters (can use port to divide based on data delivery port) s0.cmdPrint( 'tc filter add dev s0-eth0 parent 1: protocol ip prio 1 u32 match ip src '+net[ 'h0' ].IP()+' flowid 1:21' ) s0.cmdPrint( 'tc filter add dev s0-eth0 parent 1: protocol ip prio 1 u32 match ip src '+net[ 'h1' ].IP()+' flowid 1:31' ) s0.cmdPrint( 'tc qdisc show dev s0-eth0' ) info( '\n' ) # Test Iperf testIperf( net, 'h0', ('h1', 'h2') ) # Stop Network net.stop()
def test_unit_launch_network(self): try: net = Mininet(autoSetMacs=True, cleanup=True) s1 = net.addSwitch('s1') for n in range(1, 15): h = net.addHost('h%s' % n) net.addLink(h, s1) net.addController('c0', controller=RemoteController, ip='127.0.0.1', port=6653) net.start() time.sleep(5) hosts = [] for i in range(1, 15): if i != 10: hosts.append(net.get('h' + str(i))) self.assertEqual(net.ping(hosts), 0.0) finally: net.stop()
def testLinkLoss(self): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = {'loss': LOSS_PERCENT, 'use_htb': True} mn = Mininet(topo=SingleSwitchOptionsTopo(n=N, lopts=lopts), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() loptsStr = ', '.join('%s: %s' % (opt, value) for opt, value in lopts.items()) msg = ('\nTesting packet loss with %d%% loss rate\n' 'number of dropped pings during mininet.ping(): %s\n' 'expected number of dropped packets: 1\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % (LOSS_PERCENT, dropped_total, N, loptsStr, self.switchClass)) self.assertGreater(dropped_total, 0, msg)
def main(): behavioral_model = os.path.join(sys.path[0], '../targets/switch/behavioral-model') topo = SingleSwitchTopo(behavioral_model) net = Mininet(topo=topo, host=P4Host, switch=OpenflowEnabledP4Switch, controller=None) net.start() h1 = net.get('h1') h1.setARP("10.0.0.1", "00:aa:bb:00:00:00") h1.setDefaultRoute("dev eth0 via 10.0.0.1") h1.describe() h2 = net.get('h2') h2.setARP("10.0.1.1", "00:aa:bb:00:00:01") h2.setDefaultRoute("dev eth0 via 10.0.1.1") h2.describe() configure_switch() time.sleep(1) print "Ready !" result = 0 if parser_args.cli: CLI(net) else: time.sleep(3) node_values = net.values() print node_values hosts = net.hosts print hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts, 30) # print host arp table & routes for host in hosts: print "ARP ENTRIES ON HOST" print host.cmd('arp -n') print "HOST ROUTES" print host.cmd('route') print "HOST INTERFACE LIST" intfList = host.intfNames() print intfList if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" net.stop() return result
def linearBandwidthTest(lengths): "Check bandwidth at various lengths along a switch chain." results = {} switchCount = max(lengths) hostCount = switchCount + 1 switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } topo = LinearTestTopo(hostCount) for datapath in switches.keys(): print "*** testing", datapath, "datapath" Switch = switches[datapath] results[datapath] = [] net = Mininet(topo=topo, switch=Switch) net.start() print "*** testing basic connectivity" for n in lengths: net.ping([net.hosts[0], net.hosts[n]]) print "*** testing bandwidth" for n in lengths: src, dst = net.hosts[0], net.hosts[n] print "testing", src.name, "<->", dst.name, bandwidth = net.iperf([src, dst]) print bandwidth flush() results[datapath] += [(n, bandwidth)] net.stop() for datapath in switches.keys(): print print "*** Linear network results for", datapath, "datapath:" print result = results[datapath] print "SwitchCount\tiperf Results" for switchCount, bandwidth in result: print switchCount, '\t\t', print bandwidth[0], 'server, ', bandwidth[1], 'client' print print
class FaucetTaggedAndUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: 1: tagged_vlans: [100] description: "b1" 2: tagged_vlans: [100] description: "b2" 3: native_vlan: 101 description: "b3" 4: native_vlan: 101 description: "b4" vlans: 100: description: "tagged" 101: description: "untagged" """ def setUp(self): super(FaucetTaggedAndUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=2, n_untagged=2) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() def test_seperate_untagged_tagged(self): tagged_host_pair = self.net.hosts[0:1] untagged_host_pair = self.net.hosts[2:3] # hosts within VLANs can ping each other self.assertEquals(0, self.net.ping(tagged_host_pair)) self.assertEquals(0, self.net.ping(untagged_host_pair)) # hosts cannot ping hosts in other VLANs self.assertEquals(100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]])) def tearDown(self): self.net.stop() super(FaucetTaggedAndUntaggedTest, self).tearDown() time.sleep(1)
def main(): behavioral_model = os.path.join(sys.path[0], '../targets/switch/behavioral-model') topo = SingleSwitchTopo(behavioral_model) net = Mininet(topo=topo, host=P4Host, switch=OpenflowEnabledP4Switch, controller=None ) net.start() h1 = net.get('h1') h1.setARP("10.0.0.1", "00:aa:bb:00:00:00") h1.setDefaultRoute("dev eth0 via 10.0.0.1") h1.describe() h2 = net.get('h2') h2.setARP("10.0.1.1", "00:aa:bb:00:00:01") h2.setDefaultRoute("dev eth0 via 10.0.1.1") h2.describe() configure_switch() time.sleep(1) print "Ready !" result = 0 if parser_args.cli: CLI( net ) else: time.sleep(3) node_values = net.values() print node_values hosts = net.hosts print hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts,30) # print host arp table & routes for host in hosts: print "ARP ENTRIES ON HOST" print host.cmd('arp -n') print "HOST ROUTES" print host.cmd('route') print "HOST INTERFACE LIST" intfList = host.intfNames() print intfList if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" net.stop() return result
def linearBandwidthTest( lengths ): "Check bandwidth at various lengths along a switch chain." results = {} switchCount = max( lengths ) hostCount = switchCount + 1 switches = { 'reference user': UserSwitch, 'Open vSwitch kernel': OVSKernelSwitch } topo = LinearTestTopo( hostCount ) for datapath in switches.keys(): print "*** testing", datapath, "datapath" Switch = switches[ datapath ] results[ datapath ] = [] net = Mininet( topo=topo, switch=Switch ) net.start() print "*** testing basic connectivity" for n in lengths: net.ping( [ net.hosts[ 0 ], net.hosts[ n ] ] ) print "*** testing bandwidth" for n in lengths: src, dst = net.hosts[ 0 ], net.hosts[ n ] print "testing", src.name, "<->", dst.name, bandwidth = net.iperf( [ src, dst ] ) print bandwidth flush() results[ datapath ] += [ ( n, bandwidth ) ] net.stop() for datapath in switches.keys(): print print "*** Linear network results for", datapath, "datapath:" print result = results[ datapath ] print "SwitchCount\tiperf Results" for switchCount, bandwidth in result: print switchCount, '\t\t', print bandwidth[ 0 ], 'server, ', bandwidth[ 1 ], 'client' print print
def testDecisionTopology(): "Create network and run simple performance test" topo = SpecialTopo(100, 10) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, controller=RemoteController) net.start() time.sleep(10) h1, h2, h3, h4, h5, h6 = net.hosts pairs = [(h1, h2), (h2, h3), (h4, h5), (h5, h6), (h1, h4), (h2, h5), (h3, h6), (h2, h4), (h3, h5)] for a,b in pairs: net.ping([a,b]) net.iperf([a,b]) for i in xrange(4): j = requests.get("http://localhost:8080/set?f={}".format(i)).json() print j["message"] print test_pings(h1, h6, 40) print net.iperf([h1,h6], l4Type='UDP', udpBw='10M') time.sleep(20) CLI(net) net.stop()
class FaucetTaggedAndUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: %(port_1)d: tagged_vlans: [100] description: "b1" %(port_2)d: tagged_vlans: [100] description: "b2" %(port_3)d: native_vlan: 101 description: "b3" %(port_4)d: native_vlan: 101 description: "b4" vlans: 100: description: "tagged" 101: description: "untagged" """ def setUp(self): self.CONFIG = self.CONFIG % PORT_MAP super(FaucetTaggedAndUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=2, n_untagged=2) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_seperate_untagged_tagged(self): tagged_host_pair = self.net.hosts[0:1] untagged_host_pair = self.net.hosts[2:3] # hosts within VLANs can ping each other self.assertEquals(0, self.net.ping(tagged_host_pair)) self.assertEquals(0, self.net.ping(untagged_host_pair)) # hosts cannot ping hosts in other VLANs self.assertEquals( 100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetTaggedAndUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: %(port_1)d: tagged_vlans: [100] description: "b1" %(port_2)d: tagged_vlans: [100] description: "b2" %(port_3)d: native_vlan: 101 description: "b3" %(port_4)d: native_vlan: 101 description: "b4" vlans: 100: description: "tagged" 101: description: "untagged" """ def setUp(self): self.CONFIG = self.CONFIG % PORT_MAP super(FaucetTaggedAndUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=2, n_untagged=2) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_seperate_untagged_tagged(self): tagged_host_pair = self.net.hosts[0:1] untagged_host_pair = self.net.hosts[2:3] # hosts within VLANs can ping each other self.assertEquals(0, self.net.ping(tagged_host_pair)) self.assertEquals(0, self.net.ping(untagged_host_pair)) # hosts cannot ping hosts in other VLANs self.assertEquals(100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
def startNetwork(): info( '** Creating Quagga network\n' ) topo = QuaggaTopo() global net net = Mininet(topo, controller=None ) net.start() info( '** Dumping host connections\n' ) dumpNodeConnections(net.legacyRouters) info( '** Testing network connectivity\n' ) net.ping(net.legacyRouters) info( '** Collecting BGP neighbors\n' ) for router in net.legacyRouters: quagga_cmd = "show ip bgp summary" result = router.cmd('vtysh -c \"%s\"' % quagga_cmd) info("*** %s:\n%s" % (router, result)) info( '** Running CLI\n' ) CLI( net )
def startNetwork(): info('** Creating Quagga network\n') topo = QuaggaTopo() global net net = Mininet(topo, controller=None) net.start() info('** Dumping host connections\n') dumpNodeConnections(net.legacyRouters) info('** Testing network connectivity\n') net.ping(net.legacyRouters) info('** Collecting BGP neighbors\n') for router in net.legacyRouters: quagga_cmd = "show ip bgp summary" result = router.cmd('vtysh -c \"%s\"' % quagga_cmd) info("*** %s:\n%s" % (router, result)) info('** Running CLI\n') CLI(net)
def test_link(capsys): """ Test connectivity between local and remote host via link (Without link there is no connectivity) :param capsys: just capture the std(out/err) :return: """ remote = "mininet_host" username = "******" net = Mininet(host=RemoteHost, link=RemoteLink) h1 = net.addHost('h1') h2 = net.addHost('h2', serverIp=remote, user=username) h3 = net.addHost('h3') net.addLink(h1, h2) net.start() with capsys.disabled(): print("Test connectivity between h1 and h3 (not connected)") assert net.ping([h1, h3]) == 100 print("Done\nTest connectivity between h1 and h2 (connected)") assert net.ping([h1, h2]) == 0 print('Done')
def perfTest(): topo = MyTopo() net = Mininet(topo, controller=partial(RemoteController, ip='192.168.1.1', port=6633), link=TCLink, host=CPULimitedHost) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" h1,h2 = net.getNodeByName('h1', 'h2') h3,h20 = net.getNodeByName('h3', 'h20') net.ping((h1, h2)) net.ping((h3, h20)) sleep(2) print "Executing the program to for Installing Flows" os.system('python Flow_Install.py') print "Testing bandwidth between h1 and h2" h1,h2 = net.getNodeByName('h1', 'h2') net.iperf((h1, h2)) print "Testing bandwidth between h3 and h20" h3,h20 = net.getNodeByName('h3', 'h20') net.iperf((h3, h20)) sleep(10) print "Testing bandwidth between h1 and h2" h1,h2 = net.getNodeByName('h1', 'h2') net.iperf((h1, h2)) net.stop() print "Testing bandwidth between h3 and h20" h3,h20 = net.getNodeByName('h3', 'h20') net.iperf((h3, h20)) net.stop()
def simpleTest(): "Create and test a simple network" topo = TreeTopo(depth=1, fanout=2) net = Mininet(topo, controller=RemoteController) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" #ping_all_cmd = "fping -t 10 -l -p 5000 " + " ".join([host.IP() for host in net.hosts])+" > /tmp/%s_logs.txt &" #for host in net.hosts: # host.cmd(ping_all_cmd%host.name) #print(dir(host)) for host in net.hosts: term.makeTerm(host) while True: net.ping(timeout=20) time.sleep(6) net.stop()
def testLinkLoss(self): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = {"loss": LOSS_PERCENT, "use_htb": True} mn = Mininet(topo=SingleSwitchOptionsTopo(n=N, lopts=lopts), host=CPULimitedHost, link=TCLink) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout="1") mn.stop() self.assertTrue(dropped_total > 0)
def perfTest(): info('** Creating network and run simple performance test\n') topo = SingleSwitchTopo(n=4) # modify the ip address if you are using a remote pox controller net = Mininet( topo=topo, link=Link, controller=lambda name: RemoteController(name, ip='127.0.0.1'), listenPort=6633, autoSetMacs=True) net.start() info("Dumping host connections") dumpNodeConnections(net.hosts) info("Testing network connectivity") net.pingAll() h1, h4 = net.get('h1', 'h4') info("Testing connectivity between h1 and h4") net.ping((h1, h4)) info("Testing bandwidth between h1 and h4") net.iperf((h1, h4), port=8080) net.stop()
def main(): net = Mininet(controller=None) sw1 = net.addSwitch('sw1', cls=P4DockerSwitch, target_name="p4openflowswitch", start_program="/bin/bash") h1 = net.addHost('h1', ip='10.0.0.1', mac='00:04:00:00:00:02') h2 = net.addHost('h2', ip='10.0.0.2', mac='00:05:00:00:00:02') # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0'): net.addLink(sw1, h1, port1=1) net.addLink(sw1, h2, port1=2) else: net.addLink(sw1, h1, port1=1, fast=False) net.addLink(sw1, h2, port1=2, fast=False) sw1.execProgram("/switch/docker/startup.sh", args="--of-ip %s" % parser_args.controller_ip) time.sleep(1) net.start() print "Ready !" result = 0 time.sleep(3) if parser_args.cli: CLI(net) else: node_values = net.values() print node_values hosts = net.hosts print hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts, 30) if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" net.stop() return result
def createTopo(): logging.debug("Create Topo") topo = CustomTopo() logging.debug("Start Mininet") CONTROLLER_IP = "127.0.0.1" CONTROLLER_PORT = 6633 net = Mininet(topo=topo, link=TCLink, controller=None) net.addController('controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT) net.start() time.sleep(1) for i in net.hosts: net.ping([net.hosts[0], i]) net.ping([net.hosts[1], i]) CLI(net) net.stop()
def testLinkLoss( self ): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass ) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() self.assertTrue(dropped_total > 0)
def main(): net = Mininet( controller=None ) sw1 = net.addSwitch( 'sw1', cls=P4DockerSwitch, target_name="p4openflowswitch", start_program="/bin/bash") h1 = net.addHost( 'h1', ip = '10.0.0.1', mac = '00:04:00:00:00:02' ) h2 = net.addHost( 'h2', ip = '10.0.0.2', mac = '00:05:00:00:00:02' ) # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw1, h1, port1 = 1 ) net.addLink( sw1, h2, port1 = 2 ) else: net.addLink( sw1, h1, port1 = 1, fast=False ) net.addLink( sw1, h2, port1 = 2, fast=False ) sw1.execProgram("/switch/docker/startup.sh", args="--of-ip %s" % parser_args.controller_ip) time.sleep(1) net.start() print "Ready !" result = 0 time.sleep(3) if parser_args.cli: CLI(net) else: node_values = net.values() print node_values hosts = net.hosts print hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts,30) if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" net.stop() return result
def testLinkLoss( self ): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() self.assertGreater( dropped_total, 0 )
class MininetMachine(object): def __init__(self): self._net = None self._topo = None def start(self, switch_count, hosts_per_switch, controller_ip, controller_port): """ Creates mininet network :param switch_count: Number of switches in network :param hosts_per_switch: Hosts per every switch :param controller_ip: Controller IP :param controller_port: Controller Port :return Created network object """ self._clean() setLogLevel('info') self._net = Mininet() self._topo = ComplexTopo(self._net, switch_count, hosts_per_switch, controller_ip, controller_port) self._topo.build_network() dumpNodeConnections(self._net.hosts) self._net.start() def ping_all(self): self._net.pingAll() def terminate(self): self._net.stop() def ping_hosts(self, hosts): return self._net.ping(hosts) def get_hosts(self): return self._topo.get_hosts() @staticmethod def _clean(): clean.cleanup()
def run(): topo = FatTree(6) net = Mininet(topo=topo, link=TCLink, controller=None) net.start() # sleep(5) # Mininet API for the experiment # sysctl -w net.ipv4.neigh.default.gc_thresh1=4098 # increase the arp table cache print("ARP TABLES") net.staticArp() ft = FatTreeConverted.from_mininet(net) rules, s_d = ft.getRules() ovs_rules = ft.getOVSRules(rules=rules) for switch in net.switches: print(switch.name) rl = ovs_rules[switch.name] for r in rl: cmd = "bash -c 'ovs-ofctl add-flow {} {}'".format(switch.name, r) print(switch.name, cmd) switch.cmd(cmd) print() print(ovs_rules) # Run the CLI for s, d in s_d: print(s, d) ploss = net.ping([net.getNodeByName(s), net.getNodeByName(d)]) if ploss > 0: print(s, d, "PACKET LOSS!") break print(ploss) results = iperf(net, s_d) print(results) CLI(net) net.stop()
class Demo: def __init__(self): self.wsock = None def set_wsock(self, ws): self.wsock = ws def network_creation(self): ''' Initializes the mininet topology, configures singlepath and pings src and dst to test reachability ''' print 'Starting controller' self.controllerprocess = subprocess.Popen( "./configuration/start_controller.sh", stdout=subprocess.PIPE ) sleep(1) print 'Creating network' self.net = Mininet(topo=DemoTopo(), link=TCLink, switch=OVSKernelSwitch, controller=RemoteController) self.net.start() sleep(1) subprocess.call('./configuration/configure_singlepath.sh', shell=True) print 'Testing host reachability' self.src = self.net.getNodeByName('src') self.dst = self.net.getNodeByName('dst') self.h1 = self.net.getNodeByName('h1') self.h2 = self.net.getNodeByName('h2') self.net.ping([self.src, self.dst]) def start_streaming(self): ''' Starts streaming the video file between src and dst ''' self.rtp_server = self.src.sendCmd( "vlc-wrapper -Idummy -vvv %s --repeat --mtu 1500 --sout " "'#rtp{dst=10.0.0.2,port=5050,mux=ts,ttl=64}'" % VIDEO_FILE, shell=True ) sleep(1) self.rtp_client = self.dst.sendCmd( 'vlc-wrapper --network-caching=0 rtp://@:5050') def start_controller(self): ''' Starts the controller monitoring/path setup ''' subprocess.call( './configuration/configure_controller_parameters.sh', shell=True ) sleep(5) def congest_subpath(self): ''' Congesting path until the end of the demo runtime ''' self.iperf_server = self.h2.popen( "iperf -u -s" ) sleep(1) self.iperf_client = self.h1.sendCmd( "iperf -u -c 10.0.0.4 -t 120 -b 20M" ) def readapt(self): ''' Re-running controller computation to adapt ''' subprocess.call( 'curl http://localhost:8080/multipath/recompute_multipath', shell=True ) def start(self): try: self.network_creation() sleep(13) self.wsock.send('step1') print 'Step 1 - SinglePath Streaming' self.start_streaming() sleep(30) self.wsock.send('step2') print 'Step 2 - Starting Multipath controller' self.start_controller() sleep(30) self.wsock.send('step3') print 'Step 3 - Congesting Subpath!' self.congest_subpath() sleep(20) self.wsock.send('step4') sleep(5) print 'Step 4 - Readapting' self.readapt() sleep(20) # Stop self.wsock.send('stop') sleep(1) self.cleanup() self.net.stop() except: print 'Caught exception! Cleaning up...' traceback.print_exc() self.cleanup() subprocess.call('mn -c', shell=True) subprocess.call('pkill -f python', shell=True) def stop(self): pass def cleanup(self): print 'Demo Cleanup' if hasattr(self, 'iperf_server'): self.iperf_server.send_signal(signal.SIGINT) subprocess.call('pkill -f iperf', shell=True) subprocess.call('pkill -f ryu', shell=True) if hasattr(self, 'rtp_client'): try: self.rtp_client.send_signal(signal.SIGKILL) except: pass print 'Killing VLC' subprocess.call('pkill -f vlc', shell=True)
class OpenStateErrorExperimenterMsg(app_manager.RyuApp): def __init__(self, *args, **kwargs): super(OpenStateErrorExperimenterMsg, self).__init__(*args, **kwargs) if os.geteuid() != 0: exit("You need to have root privileges to run this script") # Kill Mininet os.system("sudo mn -c 2> /dev/null") print 'Starting Mininet' self.net = Mininet(topo=SingleSwitchTopo(7), switch=UserSwitch, controller=RemoteController, cleanup=True, autoSetMacs=True, listenPort=6634, autoStaticArp=True) self.net.start() self.last_error_queue = [] self.test_id = 0 @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def switch_features_handler(self, ev): datapath = ev.msg.datapath self.monitor_thread = hub.spawn( getattr(self, '_monitor%s' % self.test_id), datapath) self.test_id += 1 def add_flow(self, datapath, priority, match, actions): inst = [ ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions) ] mod = ofparser.OFPFlowMod(datapath=datapath, cookie=0, cookie_mask=0, table_id=0, command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, priority=priority, buffer_id=ofproto.OFP_NO_BUFFER, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY, flags=0, match=match, instructions=inst) datapath.send_msg(mod) def send_table_mod(self, datapath): req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=0, stateful=1) datapath.send_msg(req) def send_key_lookup(self, datapath): key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_lookup_extractor) def send_key_update(self, datapath): key_update_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_update_extractor) def test0(self, datapath): self.send_key_lookup(datapath) self.send_key_update(datapath) def test1(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) actions = [ofparser.OFPActionOutput(2, 0)] match = ofparser.OFPMatch(in_port=1, state=6) self.add_flow(datapath, 150, match, actions) actions = [osparser.OFPExpActionSetState(state=6, table_id=10)] match = ofparser.OFPMatch(in_port=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(1, 0)] match = ofparser.OFPMatch(in_port=2) self.add_flow(datapath, 200, match, actions) def test2(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) actions = [ofparser.OFPActionOutput(2, 0)] match = ofparser.OFPMatch(in_port=1, state=6) self.add_flow(datapath, 150, match, actions) actions = [osparser.OFPExpActionSetState(state=6, table_id=200)] match = ofparser.OFPMatch(in_port=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(1, 0)] match = ofparser.OFPMatch(in_port=2) self.add_flow(datapath, 200, match, actions) def test3(self, datapath): self.send_table_mod(datapath) # I provide zero fields => I cannot set an empty extractor! key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[], table_id=0) datapath.send_msg(key_lookup_extractor) # I provide more fields than allowed key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST, ofproto.OXM_OF_IPV4_DST, ofproto.OXM_OF_TCP_SRC, ofproto.OXM_OF_TCP_DST, ofproto.OXM_OF_UDP_SRC, ofproto.OXM_OF_UDP_DST ], table_id=0) datapath.send_msg(key_lookup_extractor) def test4(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) # I provide zero keys => I cannot access the state table with an empty key! state = osparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[], table_id=0) datapath.send_msg(state) # I provide more keys than allowed state = osparser.OFPExpMsgSetFlowState( datapath=datapath, state=88, keys=[ 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4, 5 ], table_id=0) datapath.send_msg(state) def test5(self, datapath): self.send_table_mod(datapath) key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_lookup_extractor) key_update_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) state = osparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[10, 0, 0, 5], table_id=0) datapath.send_msg(state) def test6(self, datapath): self.send_table_mod(datapath) key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_lookup_extractor) key_update_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) state = osparser.OFPExpMsgDelFlowState(datapath=datapath, keys=[10, 0, 0, 5], table_id=0) datapath.send_msg(state) def test7(self, datapath): self.send_table_mod(datapath) key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_lookup_extractor) key_update_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) def test8(self, datapath): self.send_table_mod(datapath) key_update_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_lookup_extractor) def test9(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = osparser.OFPExpMsgSetFlowState( datapath=datapath, state=88, keys=[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4], table_id=200) datapath.send_msg(state) def test10(self, datapath): self.send_table_mod(datapath) actions = [ofparser.OFPActionOutput(6, 0)] match = ofparser.OFPMatch(in_port=5, ip_proto=1, eth_type=0x800, global_state=2863311530) self.add_flow(datapath, 150, match, actions) msg = osparser.OFPExpSetGlobalState(datapath=datapath, global_state=2863311530, global_state_mask=0xffffffff) datapath.send_msg(msg) actions = [ofparser.OFPActionOutput(5, 0)] match = ofparser.OFPMatch(in_port=6, ip_proto=1, eth_type=0x800) self.add_flow(datapath, 150, match, actions) def test11(self, datapath): self.send_table_mod(datapath) (global_state, global_state_mask) = osparser.masked_global_state_from_str( "1*1*1*1*1*1*1*1*0*0*1*1*1*1*1*1*") actions = [ofparser.OFPActionOutput(6, 0)] match = ofparser.OFPMatch( in_port=5, eth_type=0x800, ip_proto=1, global_state=osparser.masked_global_state_from_str( "1*1*1*1*1*1*1*1*0*0*1*1*1*1*1*1*")) self.add_flow(datapath, 150, match, actions) msg = osparser.OFPExpSetGlobalState( datapath=datapath, global_state=global_state, global_state_mask=global_state_mask) datapath.send_msg(msg) actions = [ofparser.OFPActionOutput(5, 0)] match = ofparser.OFPMatch(in_port=6, ip_proto=1, eth_type=0x800) self.add_flow(datapath, 200, match, actions) def test12(self, datapath): self.send_table_mod(datapath) actions = [ofparser.OFPActionOutput(6, 0)] match = ofparser.OFPMatch(in_port=5, ip_proto=1, eth_type=0x800, global_state=1492) self.add_flow(datapath, 200, match, actions) actions = [osparser.OFPExpActionSetGlobalState(global_state=1492)] match = ofparser.OFPMatch(in_port=5, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(5, 0)] match = ofparser.OFPMatch(in_port=6, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 200, match, actions) def test13(self, datapath): self.send_table_mod(datapath) (global_state, global_state_mask ) = osparser.masked_global_state_from_str("*1*1*1*1*0*0*1*1*1*1*1*1*") actions = [ofparser.OFPActionOutput(6, 0)] match = ofparser.OFPMatch( in_port=5, eth_type=0x800, ip_proto=1, global_state=osparser.masked_global_state_from_str( "*1*1*1*1*0*0*1*1*1*1*1*1*")) self.add_flow(datapath, 200, match, actions) actions = [ osparser.OFPExpActionSetGlobalState( global_state=global_state, global_state_mask=global_state_mask) ] match = ofparser.OFPMatch(in_port=5, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(5, 0)] match = ofparser.OFPMatch(in_port=6, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 200, match, actions) def test14(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) command = 255 data = struct.pack(osproto.OFP_EXP_STATE_MOD_PACK_STR, command) exp_type = osproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test15(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) # dummy data payload command = 255 data = struct.pack(osproto.OFP_EXP_STATE_MOD_PACK_STR, command) exp_type = 2**32 - 1 msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test16(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) command = osproto.OFPSC_EXP_SET_FLOW_STATE # instead of packing into '!Bx' data = struct.pack('!B', command) exp_type = osproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test17(self, datapath): state = osparser.OFPExpMsgSetFlowState( datapath=datapath, state=88, keys=[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4], table_id=0) datapath.send_msg(state) def test18(self, datapath): state = osparser.OFPExpMsgDelFlowState( datapath=datapath, keys=[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4], table_id=0) datapath.send_msg(state) def test19(self, datapath): data = struct.pack(osproto.OFP_EXP_STATE_MOD_PACK_STR, osproto.OFPSC_EXP_SET_GLOBAL_STATE) exp_type = osproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test20(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) act_type = 2 data = struct.pack('!I4xB', act_type, 0) a = ofparser.OFPActionExperimenterUnknown(experimenter=0XBEBABEBA, data=data) actions = [a] match = ofparser.OFPMatch(in_port=5, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 100, match, actions) def test21(self, datapath): command = osproto.OFPSC_EXP_STATEFUL_TABLE_CONFIG data = struct.pack(osproto.OFP_EXP_STATE_MOD_PACK_STR, command) data += struct.pack('!B', 0) exp_type = osproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test22(self, datapath): command = osproto.OFPSC_EXP_STATEFUL_TABLE_CONFIG data = struct.pack(osproto.OFP_EXP_STATE_MOD_PACK_STR, command) data += struct.pack( osproto.OFP_EXP_STATE_MOD_STATEFUL_TABLE_CONFIG_PACK_STR, 250, 1) exp_type = osproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test23(self, datapath): self.send_table_mod(datapath) key_lookup_extractor = osparser.OFPExpMsgKeyExtract( datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=250) datapath.send_msg(key_lookup_extractor) def test24(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = osparser.OFPExpMsgDelFlowState( datapath=datapath, keys=[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4], table_id=0) datapath.send_msg(state) def test25(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = osparser.OFPExpMsgDelFlowState( datapath=datapath, keys=[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4], table_id=250) datapath.send_msg(state) def test26(self, datapath): command = osproto.OFPSC_EXP_DEL_FLOW_STATE data = struct.pack(osproto.OFP_EXP_STATE_MOD_PACK_STR, command) data += struct.pack('!B3xIBBBB', 0, 3, 0, 0, 0, 1) exp_type = osproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test27(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = osparser.OFPExpMsgDelFlowState( datapath=datapath, keys=[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 4, 0], table_id=0) datapath.send_msg(state) def test28(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) act_type = osproto.OFPAT_EXP_SET_STATE data = struct.pack('!I4xB', act_type, 0) a = ofparser.OFPActionExperimenterUnknown(experimenter=0XBEBABEBA, data=data) actions = [a] match = ofparser.OFPMatch(in_port=5, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 100, match, actions) def test29(self, datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) act_type = osproto.OFPAT_EXP_SET_GLOBAL_STATE data = struct.pack('!I4x', act_type) a = ofparser.OFPActionExperimenterUnknown(experimenter=0XBEBABEBA, data=data) actions = [a] match = ofparser.OFPMatch(in_port=5, eth_type=0x800, ip_proto=1) self.add_flow(datapath, 100, match, actions) ''' To perform test #30 you have to comment lines 129-130 of ryu/ofproto/oxx_fields.py file and recompile the controller. Furthermore you have to uncomment _monitor30 in this file With this little patch the controller does not mask the match field, triggering the error at switch side. def test30(self,datapath): self.send_table_mod(datapath) actions = [] match = ofparser.OFPMatch(in_port=1,ip_proto=1,eth_type=0x800,state=(7,8)) self.add_flow(datapath, 150, match, actions) ''' def wait_for_error(self, test_num, err_type, err_code): attempts = 0 while len(self.last_error_queue) != 1 and attempts < 3: print 'Waiting %d seconds...' % (3 - attempts) attempts += 1 time.sleep(1) if len(self.last_error_queue) == 1 and self.last_error_queue[0] == ( err_type, err_code): print 'Test %d: \x1b[32mSUCCESS!\x1b[0m' % test_num self.last_error_queue = [] else: print 'Test %d: \x1b[31mFAIL\x1b[0m' % test_num self.stop_test_and_exit() def wait_for_two_errors(self, test_num, err_type1, err_code1, err_type2, err_code2): attempts = 0 while len(self.last_error_queue) != 2 and attempts < 3: print 'Waiting %d seconds...' % (3 - attempts) attempts += 1 time.sleep(1) if len(self.last_error_queue) == 2 and self.last_error_queue[0] == ( err_type1, err_code1) and self.last_error_queue[1] == (err_type2, err_code2): print 'Test %d: \x1b[32mSUCCESS!\x1b[0m' % test_num self.last_error_queue = [] else: print 'Test %d: \x1b[31mFAIL\x1b[0m' % test_num self.stop_test_and_exit() def try_ping(self, test_num, source, dest, drop_perc, wait=True): if wait: attempts = 0 while len(self.last_error_queue) == 0 and attempts < 3: print 'Waiting %d seconds...' % (3 - attempts) attempts += 1 time.sleep(1) drop_perc = self.net.ping( hosts=[self.net.hosts[source], self.net.hosts[dest]], timeout=1) if len(self.last_error_queue) == 0 and drop_perc == drop_perc: print 'Test %d: \x1b[32mSUCCESS!\x1b[0m' % test_num else: print 'Test %d: \x1b[31mFAIL\x1b[0m' % test_num self.stop_test_and_exit() def _monitor0(self, datapath): print("Network is ready") # [TEST 0] Setting the extractor on a stateless stage should be impossible self.test0(datapath) self.wait_for_two_errors(0, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_EXP_SET_EXTRACTOR, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_EXP_SET_EXTRACTOR) self.restart_mininet() def _monitor1(self, datapath): print("Network is ready") # [TEST 1] Set state action must be performed onto a stateful stage (run-time check => no error is returned!) # mininet> h1 ping -c5 h2 # ping should fail, but rules are correctly installed self.test1(datapath) self.try_ping(test_num=1, source=0, dest=1, drop_perc=100) self.restart_mininet() def _monitor2(self, datapath): print("Network is ready") # [TEST 2] Set state action must be performed onto a stage with table_id less or equal than the number of pipeline's tables (install-time check) self.test2(datapath) self.wait_for_error(2, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor3(self, datapath): print("Network is ready") # [TEST 3] OFPExpMsgKeyExtract: I should provide a number of fields >0 and <MAX_FIELD_COUNT self.test3(datapath) self.wait_for_two_errors(3, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor4(self, datapath): print("Network is ready") # [TEST 4] OFPExpMsgSetFlowState: I should provide a key of size >0 and <MAX_KEY_LEN self.test4(datapath) self.wait_for_two_errors(4, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor5(self, datapath): print("Network is ready") # [TEST 5] OFPExpMsgSetFlowState: I should provide a key of size consistent with the number of fields of the update-scope self.test5(datapath) self.wait_for_error(5, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor6(self, datapath): print("Network is ready") # [TEST 6] OFPExpMsgDelFlowState: I should provide a key of size consistent with the number of fields of the update-scope self.test6(datapath) self.wait_for_error(6, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor7(self, datapath): print("Network is ready") # [TEST 7] OFPExpMsgKeyExtract: lookup-scope and update-scope must provide same length keys self.test7(datapath) self.wait_for_error(7, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor8(self, datapath): print("Network is ready") # [TEST 8] OFPExpMsgKeyExtract: lookup-scope and update-scope must provide same length keys self.test8(datapath) self.wait_for_error(8, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor9(self, datapath): print("Network is ready") # [TEST 9] OFPExpMsgSetFlowState: must be executed onto a stage with table_id<=64 (number of pipeline's tables) self.test9(datapath) self.wait_for_error(9, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor10(self, datapath): print("Network is ready") # [TEST 10] exact match on global_state # mininet> h5 ping -c1 h6 self.test10(datapath) self.try_ping(test_num=10, source=4, dest=5, drop_perc=0) self.restart_mininet() def _monitor11(self, datapath): print("Network is ready") # [TEST 11] masked match on global_state # mininet> h5 ping -c1 h6 self.test11(datapath) self.try_ping(test_num=11, source=4, dest=5, drop_perc=0) self.restart_mininet() def _monitor12(self, datapath): print("Network is ready") # [TEST 12] exact match on global_state # mininet> h5 ping -c2 h6 # the first ping should fail self.test12(datapath) # TODO: if Mininet had 'count' parameter we could simplify the code by checking drop_perc=0.25 with count=2 self.try_ping(test_num=12, source=4, dest=5, drop_perc=50) self.try_ping(test_num=12, source=4, dest=5, drop_perc=0, wait=False) self.restart_mininet() def _monitor13(self, datapath): print("Network is ready") # [TEST 13] masked match on global_state # mininet> h5 ping -c5 h6 # the first ping should fail self.test12(datapath) # TODO: if Mininet had 'count' parameter we could simplify the code by checking drop_perc=0.25 with count=2 self.try_ping(test_num=13, source=4, dest=5, drop_perc=50) self.try_ping(test_num=13, source=4, dest=5, drop_perc=0, wait=False) self.restart_mininet() def _monitor14(self, datapath): print("Network is ready") # [TEST 14]_STATE MOD with unknown command self.test14(datapath) self.wait_for_error(14, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_EXP_STATE_MOD_BAD_COMMAND) self.restart_mininet() def _monitor15(self, datapath): print("Network is ready") # [TEST 15]_OpenState unknown experimenter message self.test15(datapath) self.wait_for_error(15, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_MESSAGE) self.restart_mininet() def _monitor16(self, datapath): print("Network is ready") # [TEST 16]_OpenState experimenter message too short self.test16(datapath) self.wait_for_error(16, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor17(self, datapath): print("Network is ready") # [TEST 17]_Set_state in a non stateful stage self.test17(datapath) self.wait_for_error(17, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_EXP_SET_FLOW_STATE) self.restart_mininet() def _monitor18(self, datapath): print("Network is ready") # [TEST 18]_Del_flow_state in a non stateful stage self.test18(datapath) self.wait_for_error(18, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_EXP_DEL_FLOW_STATE) self.restart_mininet() def _monitor19(self, datapath): print("Network is ready") # [TEST 19]_setglobalstate with invalid length self.test19(datapath) self.wait_for_error(19, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor20(self, datapath): print("Network is ready") # [TEST 20]_unknown OpenState experimenter action self.test20(datapath) self.wait_for_error(20, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_ACTION) self.restart_mininet() def _monitor21(self, datapath): print("Network is ready") # [TEST 21]_State Mod Stateful table config with invalid length self.test21(datapath) self.wait_for_error(21, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor22(self, datapath): print("Network is ready") # [TEST 22]_State Mod Stateful table config with invalid table ID self.test22(datapath) self.wait_for_error(22, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor23(self, datapath): print("Network is ready") # [TEST 23]_Set extractor with invalid table ID self.test22(datapath) self.wait_for_error(23, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor24(self, datapath): print("Network is ready") # [TEST 24]_Del_flow_state with empty state table self.test24(datapath) self.wait_for_error(24, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_EXP_DEL_FLOW_STATE) self.restart_mininet() def _monitor25(self, datapath): print("Network is ready") # [TEST 25]_Del_flow_state with invalid table ID self.test25(datapath) self.wait_for_error(25, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor26(self, datapath): print("Network is ready") # [TEST 26]_Del_flow_state with bad length self.test26(datapath) self.wait_for_error(26, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor27(self, datapath): print("Network is ready") # [TEST 27]_Del_flow_state with key not consistent with update scope self.test27(datapath) self.wait_for_error(27, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor28(self, datapath): print("Network is ready") # [TEST 28] Set state action with invalid length self.test28(datapath) self.wait_for_error(28, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor29(self, datapath): print("Network is ready") # [TEST 29] Set global state action with invalid length self.test29(datapath) self.wait_for_error(29, ofproto.OFPET_EXPERIMENTER, osproto.OFPEC_BAD_EXP_LEN) #self.restart_mininet() self.stop_test_and_gracefully_exit() ''' To perform the test #30 you have to comment lines 129-130 of ryu/ofproto/oxx_fields.py file and recompile the controller Furthermore you have to uncomment test30 in this file With this little patch the controller does not mask the match field, triggering the error at switch side. def _monitor30(self,datapath): print("Network is ready") # [TEST 30] Bad masked state match field self.test30(datapath) self.wait_for_error(30,ofproto.OFPET_EXPERIMENTER,osproto.OFPEC_BAD_MATCH_WILDCARD) #self.restart_mininet() self.stop_test_and_gracefully_exit() ''' @set_ev_cls(ofp_event.EventOFPErrorExperimenterMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER]) def exp_error_msg_handler(self, ev): msg = ev.msg if msg.experimenter == OPENSTATE_EXPERIMENTER_ID: self.last_error_queue.append((msg.type, msg.exp_type)) def stop_test_and_exit(self): # Kill Mininet and/or Ryu self.net.stop() os.system("sudo mn -c 2> /dev/null") os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") def stop_test_and_gracefully_exit(self): # Kill Mininet and/or Ryu self.net.stop() os.system("sudo mn -c 2> /dev/null") # Send SIGTERM instead of SIGKILL os.system("kill -7 $(pidof -x ryu-manager) 2> /dev/null") def restart_mininet(self): print 'Restarting Mininet\n' os.system("sudo mn -c 2> /dev/null") self.net = Mininet(topo=SingleSwitchTopo(7), switch=UserSwitch, controller=RemoteController, cleanup=True, autoSetMacs=True, listenPort=6634, autoStaticArp=True) self.net.start()
class VirtualNetwork(object): __metaclass__ = Singleton2 initialized = False def __init__(self): self.net = Mininet(controller=partial(RemoteController, ip='192.168.255.7', port=6633), switch=OVSSwitch) self.controller = self.net.addController('controller', port=6633) def get_net(self): return self.net def init_topo(self): if self.initialized: return self.initialized = True print "*** Creating switches" s1 = self.net.addSwitch('s1') s2 = self.net.addSwitch('s2') s3 = self.net.addSwitch('s3') self.switch = s2 print "*** Creating hosts" hosts1 = [self.net.addHost('h%d' % n) for n in 1, 2] hosts2 = [self.net.addHost('h%d' % n) for n in 3, 4] print "*** Creating links" for h in hosts1: self.net.addLink(s2, h) for h in hosts2: self.net.addLink(s3, h) self.net.addLink(s1, s2) self.net.addLink(s1, s3) print "*** Starting network" self.net.build() self.controller.start() s1.start([self.controller]) s2.start([self.controller]) s3.start([self.controller]) print "*** Testing network" self.net.pingAll() def add_switch(self, name): print "*** Add switch" s = self.net.addSwitch(name) s.start([self.controller]) #self.start_net() return s def remove_switch(self, name): print "*** Remove switch" print self.net.switches to_be_removed = [] for index in range(len(self.net.links)): linkinfo = self.net.links[index].intf1.name.split('-') linkinfo.extend(self.net.links[index].intf2.name.split('-')) print "%s, %s, %s" % (self.net.links[index].intf1.name, self.net.links[index].intf2.name, linkinfo) if name in linkinfo: self.net.links[index].stop() to_be_removed.append(self.net.links[index]) print "to_be_removed %s" % to_be_removed for index in range(len(to_be_removed)): print "index is %s, %s" % (index, to_be_removed[index]) self.net.links.remove(to_be_removed[index]) for index in range(len(self.net.switches)): if self.net.switches[index].name == name: self.net.switches[index].stop() self.net.switches = filter(lambda x:x.name != name, self.net.switches) print self.net.switches def add_host(self, name): print "*** Add host" host = self.net.addHost(name) #self.start_net() return host def remove_host(self, name): print "*** Remove host" print self.net.hosts to_be_removed = [] for index in range(len(self.net.links)): linkinfo = self.net.links[index].intf1.name.split('-') linkinfo.extend(self.net.links[index].intf2.name.split('-')) print "%s, %s, %s" % (self.net.links[index].intf1.name, self.net.links[index].intf2.name, linkinfo) if name in linkinfo: print "remove link %s" % self.net.links[index] self.net.links[index].stop() to_be_removed.append(self.net.links[index]) print "to_be_removed %s" % to_be_removed for index in range(len(to_be_removed)): print "index is %s, %s" % (index, to_be_removed[index]) self.net.links.remove(to_be_removed[index]) for index in range(len(self.net.hosts)): if self.net.hosts[index].name == name: print "remove host: %s" % self.net.hosts[index].name self.net.hosts[index].stop() print self.net.hosts self.net.hosts = filter(lambda x:x.name != name, self.net.hosts) print self.net.hosts def add_link(self, node1, node2): print "*** Add link" self.net.addLink(node1, node2) #self.start_net() def remove_link(self, node1, node2, port1, port2): print "*** Remove link" name = "%s-%s<->%s-%s" % (node1, port1, node2, port2) to_be_removed = [] for index in range(len(self.net.links)): link_name = "%s<->%s" % (self.net.links[index].intf1.name, self.net.links[index].intf2.name) reverse_link_name = "%s<->%s" % (self.net.links[index].intf2.name, self.net.links[index].intf1.name) if name == link_name or name == reverse_link_name: self.net.links[index].stop() to_be_removed.append(self.net.links[index]) break print "to_be_removed %s" % to_be_removed for index in range(len(to_be_removed)): print "index is %s, %s" % (index, to_be_removed[index]) self.net.links.remove(to_be_removed[index]) def ping_all(self): self.net.pingAll() def ping_between_hosts(self, node1, node2): hosts = [node1, node2] self.net.ping(hosts) def start_net(self): self.net.build() self.net.start()
def runQueue(): # Run Mininet net = Mininet( topo=NetworkTopo(), link=TCLink ) net.start() # Test Ping All Host info("\n\n", net.ping() ,"\n") info( '\n****Start Analysis *****\n' ) # Set Queue Discipline to CBQ Bounded info( '\n*** Queue Disicline = CBQ [bounded] :\n' ) net[ 'r0' ].cmdPrint( 'tc qdisc del dev r0-eth0 root' ) net[ 'r0' ].cmdPrint( 'tc qdisc add dev r0-eth0 root handle 1: cbq rate 7Mbit avpkt 1000' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth0 parent 1: classid 1:1 cbq rate 1Mbit avpkt 1000 bounded' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth0 parent 1: classid 1:2 cbq rate 3Mbit avpkt 1000 bounded' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth0 parent 1: classid 1:3 cbq rate 5Mbit avpkt 1000 bounded' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth0 parent 1: protocol ip u32 match ip src '+net[ 'h1' ].IP()+' flowid 1:1' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth0 parent 1: protocol ip u32 match ip src '+net[ 'h2' ].IP()+' flowid 1:2' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth0 parent 1: protocol ip u32 match ip src '+net[ 'h3' ].IP()+' flowid 1:3' ) net[ 'r0' ].cmdPrint( 'tc qdisc show dev r0-eth0' ) info( "\n" ) # Test Iperf CBQ Bounded testIperf( net, 'h0', ('h1', 'h2', 'h3') ) # Set Queue Discipline to CBQ Isolated info( '\n*** Queue Disicline = CBQ [isolated] :\n' ) net[ 'r0' ].cmdPrint( 'tc qdisc del dev r0-eth1 root' ) net[ 'r0' ].cmdPrint( 'tc qdisc add dev r0-eth1 root handle 2: cbq rate 7Mbit avpkt 1000' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth1 parent 2: classid 2:1 cbq rate 1Mbit avpkt 1000 isolated' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth1 parent 2: classid 2:2 cbq rate 3Mbit avpkt 1000 isolated' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth1 parent 2: classid 2:3 cbq rate 5Mbit avpkt 1000 isolated' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth1 parent 2: protocol ip u32 match ip src '+net[ 'h0' ].IP()+' flowid 2:1' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth1 parent 2: protocol ip u32 match ip src '+net[ 'h2' ].IP()+' flowid 2:2' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth1 parent 2: protocol ip u32 match ip src '+net[ 'h3' ].IP()+' flowid 2:3' ) net[ 'r0' ].cmdPrint( 'tc qdisc show dev r0-eth1' ) info( "\n" ) # Test Iperf CBQ Isolated testIperf( net, 'h1', ('h0', 'h2', 'h3') ) # Set Queue Discipline to HTB info( '\n*** Queue Disicline = HTB :\n' ) net[ 'r0' ].cmdPrint( 'tc qdisc del dev r0-eth2 root' ) # add queue discipline root net[ 'r0' ].cmdPrint( 'tc qdisc add dev r0-eth2 root handle 3:0 htb ' ) # add queue dicipline classes net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth2 parent 3: classid 3:1 htb rate 5Mbit ceil 4Mbit burst 2k' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth2 parent 3: classid 3:2 htb rate 4Mbit ceil 2Mbit burst 2k' ) net[ 'r0' ].cmdPrint( 'tc class add dev r0-eth2 parent 3: classid 3:3 htb rate 2Mbit ceil 3Mbit burst 2k' ) #Add pfifo queuing net[ 'r0' ].cmdPrint( ' tc qdisc add dev r0-eth2 parent 3:1 handle 31 pfifo limit 10') net[ 'r0' ].cmdPrint( ' tc qdisc add dev r0-eth2 parent 3:2 handle 32 pfifo limit 10') net[ 'r0' ].cmdPrint( ' tc qdisc add dev r0-eth2 parent 3:3 handle 33 pfifo limit 10') # add queue dicipline filters net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth2 parent 3: protocol ip u32 match ip src '+net[ 'h0' ].IP()+' flowid 3:1' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth2 parent 3: protocol ip u32 match ip src '+net[ 'h1' ].IP()+' flowid 3:2' ) net[ 'r0' ].cmdPrint( 'tc filter add dev r0-eth2 parent 3: protocol ip u32 match ip src '+net[ 'h2' ].IP()+' flowid 3:3' ) net[ 'r0' ].cmdPrint( 'tc qdisc show dev r0-eth2' ) info( "\n" ) # Test Iperf HTB testIperf( net, 'h2', ('h0', 'h1', 'h3') ) # Set Queue Discipline to No Queue info( '\n*** Queue Disicline = No Queue :\n' ) net[ 'r0' ].cmdPrint( 'tc qdisc del dev r0-eth3 root' ) net[ 'r0' ].cmdPrint( 'tc qdisc show dev r0-eth3' ) info( "\n" ) # Test Iperf No Queue testIperf( net, 'h3', ('h0', 'h1', 'h2') ) # Stop Mininet net.stop()
def main(cli=0, ipv6=0): net = Mininet( controller = None ) # add hosts h1 = net.addHost( 'h1', ip = '172.16.101.5/24', mac = '00:04:00:00:00:02' ) h2 = net.addHost( 'h2', ip = '172.16.102.5/24', mac = '00:05:00:00:00:02' ) # add switch 1 sw1 = net.addSwitch( 'sw1', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw1/l3_static', pcap_dump = True ) # add switch 2 sw2 = net.addSwitch( 'sw2', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw2/l3_static', pcap_dump = True ) # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw1, h1, port1 = 1 ) net.addLink( sw1, sw2, port1 = 2, port2 = 2 ) net.addLink( sw2, h2, port1 = 1 ) else: net.addLink( sw1, h1, port1 = 1, fast=False ) net.addLink( sw1, sw2, port1 = 2, port2 = 2, fast=False ) net.addLink( sw2, h2, port1 = 1, fast=False ) net.start() # hosts configuration - ipv4 h1.setARP( ip = '172.16.101.1', mac = '00:01:00:00:00:01' ) h2.setARP( ip = '172.16.102.1', mac = '00:02:00:00:00:01' ) h1.setDefaultRoute( 'via 172.16.101.1' ) h2.setDefaultRoute( 'via 172.16.102.1' ) if ipv6: # hosts configuration - ipv6 h1.setIP6('2ffe:0101::5', 64, 'h1-eth0') h2.setIP6('2ffe:0102::5', 64, 'h2-eth0') h1.setDefaultRoute('via 2ffe:0101::1', True) h2.setDefaultRoute('via 2ffe:0102::1', True) result = 0 if cli: CLI( net ) else: sleep(10) hosts = net.hosts print hosts # ping hosts print "ping between the hosts" result = net.ping(hosts, 30) if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL" if ipv6: result = net.ping6(hosts, 30) if result != 0: print "PING6 FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING6 SUCCESSFUL" # print host arp table & routes for host in hosts: print "arp entries on host" print host.cmd('arp -n') print "host routes" print host.cmd('route') print "host interface list" intfList = host.intfNames() print intfList net.stop() return result
def stringBandwidthTest(host_class, controller_class, link_class, size, tdf, data_file): "Check bandwidth at various lengths along a switch chain." topo_class = StringTestTopo(size) net = Mininet(topo=topo_class, host=host_class, switch=OVSKernelSwitch, controller=controller_class, waitConnected=True, link=link_class) # no tdf_adaptor to change TDF net.start() print "*** testing basic connectivity\n" src, dst = net.hosts if tdf == 1: num_pings = 3 for i in irange(1, num_pings): ping_result = list(net.pingFull([src, dst])) # ping_result=[(host1), (host2)] # host = (src, dst, data) # data = (#sent, #received, rttmin, rttavg, rttmax, rttdev) print "Ping avg rtt = %s\n" % ping_result[0][2][3] rttavg = ping_result[0][2][3] data_file.write("RTT Avg = %s ms\n" % rttavg) else: net.ping([src, dst]) print "*** testing bandwidth\n" num_rounds = 5 client_history = [] time = 16 omit = 1 for i in irange(1, num_rounds): # bandwidth = net.iperf( [src, dst], l4Type = 'UDP', udpBw='%sM'%set_bw, format = 'm', time=20, clifile=data_file, serfile=data_file ) bandwidth = net.iperf([src, dst], l4Type='TCP', format='m', time=time, omit=omit, clifile=data_file, serfile=data_file) flush() serout = bandwidth[0] cliout = bandwidth[1] if len(serout) > 0 and len(cliout) > 0: serDataStr, unit = serout.split(" ") serData = float(serDataStr) cliDataStr, unit = cliout.split(" ") cliData = float(cliDataStr) client_history.append(cliData) data_file.write("%s\t%f\t%f\t%s\t%s\n" % (size, src.tdf, net.cpu_usage, serData, cliData)) client_mean = numpy.mean(client_history) client_stdev = numpy.std(client_history) data_file.write("Avg Throughtput = %f\n" % client_mean) data_file.write("STD Throughput = %f\n" % client_stdev) print "AVG = %f " % client_mean print "STD = %f " % client_stdev data_file.write('\n\n') net.stop() return client_mean, client_stdev
def main(cli=0): net = Mininet( controller = None ) # add hosts h1 = net.addHost( 'h1', ip = '172.16.101.5/24', mac = '00:04:00:00:00:02' ) h2 = net.addHost( 'h2', ip = '172.16.102.5/24', mac = '00:05:00:00:00:02' ) # add switch 1 sw1 = net.addSwitch( 'sw1', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw1/l3_bgp', pcap_dump = True ) # add switch 2 sw2 = net.addSwitch( 'sw2', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw2/l3_bgp', pcap_dump = True ) # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw1, h1, port1 = 1 ) net.addLink( sw1, sw2, port1 = 2, port2 = 2 ) net.addLink( sw2, h2, port1 = 1 ) else: net.addLink( sw1, h1, port1 = 1, fast=False ) net.addLink( sw1, sw2, port1 = 2, port2 = 2, fast=False ) net.addLink( sw2, h2, port1 = 1, fast=False ) net.start() # configure hosts h1.setDefaultRoute( 'via 172.16.101.1' ) h2.setDefaultRoute( 'via 172.16.102.1' ) sw1.cmd( 'service quagga start') sw2.cmd( 'service quagga start') result = 0 if cli: CLI( net ) else: sleep(30) node_values = net.values() print node_values hosts = net.hosts print hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts,30) # print host arp table & routes for host in hosts: print "ARP ENTRIES ON HOST" print host.cmd('arp -n') print "HOST ROUTES" print host.cmd('route') print "HOST INTERFACE LIST" intfList = host.intfNames() print intfList if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" net.stop() return result
c0 = net.addController() s1 = net.addSwitch('s1') h0 = net.addHost('h0') h1 = net.addHost('h1') h2 = net.addHost('h2') net.addLink(h0, s1) net.addLink(h1, s1) net.addLink(h2, s1) net.start() print("Flow Rule Added") call('ovs-ofctl add-flow s1 priority=10,action=normal', shell=True) while 1: net.ping([h0,h1,h2],1) # timeout = 1 seconds sleep(2) """ #To stop the flow from host 0 with ip 10.0.0.1 print("Stop the flow from host 0 with ip 10.0.0.1") call( 'ovs-ofctl add-flow s1 priority=11,dl_type=0x0800,nw_src=10.0.0.1,action=drop', shell=True ) net.pingAll() #To restore the flo back for host 0 after quarantine print("Restore communication with the host 0") call( 'ovs-ofctl --strict del-flows s1 priority=11,dl_type=0x0800,nw_src=10.0.0.1', shell=True ) net.pingAll() """ #CLI(net)
def main(cli=0): net = Mininet( controller=None ) # add hosts h1 = net.addHost( 'h1', ip = '172.16.101.5/24', mac = '00:04:00:00:00:02' ) h2 = net.addHost( 'h2', ip = '172.16.102.5/24', mac = '00:05:00:00:00:02' ) h3 = net.addHost( 'h3', ip = '172.16.102.6/24', mac = '00:06:00:00:00:02' ) h4 = net.addHost( 'h4', ip = '172.16.102.7/24', mac = '00:07:00:00:00:02' ) # add switch sw_model_dir = '/p4factory/targets/switch/' sw1_fs_map = [] sw1_fs_map.append( [ os.getcwd() + '/' + 'configs/sw1/l3vi', '/configs' ] ) sw1 = net.addSwitch( 'sw1', cls=BmDockerSwitch, image='p4dockerswitch', fs_map=sw1_fs_map, model_dir=sw_model_dir ) # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw1, h1, port1 = 1 ) net.addLink( sw1, h2, port1 = 2 ) net.addLink( sw1, h3, port1 = 3 ) net.addLink( sw1, h4, port1 = 4 ) else: net.addLink( sw1, h1, port1 = 1, fast=False ) net.addLink( sw1, h2, port1 = 2, fast=False ) net.addLink( sw1, h3, port1 = 3, fast=False ) net.addLink( sw1, h4, port1 = 4, fast=False ) net.start() # configure hosts h1.setDefaultRoute( 'via 172.16.101.1' ) h2.setDefaultRoute( 'via 172.16.102.1' ) h3.setDefaultRoute( 'via 172.16.102.1' ) h4.setDefaultRoute( 'via 172.16.102.1' ) result = 0 if cli: CLI( net ) else: hosts = net.hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping( hosts, 30 ) # print host arp table & routes for host in hosts: print "ARP ENTRIES ON HOST" print host.cmd( 'arp -n' ) print "HOST ROUTES" print host.cmd( 'route' ) print "HOST INTERFACE LIST" intfList = host.intfNames() print intfList if result != 0: print "PING FAILED BETWEEN HOSTS %s" % ( hosts ) else: print "PING SUCCESSFUL!!!" net.stop() return result
class BebaErrorExperimenterMsg(app_manager.RyuApp): def __init__(self, *args, **kwargs): super(BebaErrorExperimenterMsg, self).__init__(*args, **kwargs) if os.geteuid() != 0: exit("You need to have root privileges to run this script") # Kill Mininet os.system("sudo mn -c 2> /dev/null") print 'Starting Mininet' self.net = Mininet(topo=SingleSwitchTopo(7),switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,listenPort=6634,autoStaticArp=True) self.net.start() self.last_error_queue = [] self.test_id = 0 @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def switch_features_handler(self, ev): datapath = ev.msg.datapath self.monitor_thread = hub.spawn(getattr(self, '_monitor%s' % self.test_id),datapath) self.test_id += 1 def add_flow(self, datapath, priority, match, actions): inst = [ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] mod = ofparser.OFPFlowMod( datapath=datapath, cookie=0, cookie_mask=0, table_id=0, command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, priority=priority, buffer_id=ofproto.OFP_NO_BUFFER, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY, flags=0, match=match, instructions=inst) datapath.send_msg(mod) def send_table_mod(self, datapath): req = bebaparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=0, stateful=1) datapath.send_msg(req) def send_key_lookup(self, datapath): key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_lookup_extractor) def send_key_update(self, datapath): key_update_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_update_extractor) def test0(self,datapath): self.send_key_lookup(datapath) self.send_key_update(datapath) def test1(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) actions = [ofparser.OFPActionOutput(2,0)] match = ofparser.OFPMatch(in_port=1,state=6) self.add_flow(datapath, 150, match, actions) actions = [bebaparser.OFPExpActionSetState(state=6,table_id=10)] match = ofparser.OFPMatch(in_port=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(1,0)] match = ofparser.OFPMatch(in_port=2) self.add_flow(datapath, 200, match, actions) def test2(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) actions = [ofparser.OFPActionOutput(2,0)] match = ofparser.OFPMatch(in_port=1,state=6) self.add_flow(datapath, 150, match, actions) actions = [bebaparser.OFPExpActionSetState(state=6,table_id=200)] match = ofparser.OFPMatch(in_port=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(1,0)] match = ofparser.OFPMatch(in_port=2) self.add_flow(datapath, 200, match, actions) def test3(self,datapath): self.send_table_mod(datapath) # I provide zero fields => I cannot set an empty extractor! key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[], table_id=0) datapath.send_msg(key_lookup_extractor) # I provide more fields than allowed key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST,ofproto.OXM_OF_IPV4_DST,ofproto.OXM_OF_TCP_SRC,ofproto.OXM_OF_TCP_DST,ofproto.OXM_OF_UDP_SRC,ofproto.OXM_OF_UDP_DST], table_id=0) datapath.send_msg(key_lookup_extractor) def test4(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) # I provide zero keys => I cannot access the state table with an empty key! state = bebaparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[], table_id=0) datapath.send_msg(state) # I provide more keys than allowed state = bebaparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[0,0,0,0,0,2,0,0,0,0,0,4,0,0,0,0,0,2,0,0,0,0,0,4,0,0,0,0,0,2,0,0,0,0,0,4,0,0,0,0,0,2,0,0,0,0,0,4,5], table_id=0) datapath.send_msg(state) def test5(self,datapath): self.send_table_mod(datapath) key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_lookup_extractor) key_update_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) state = bebaparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[10,0,0,5], table_id=0) datapath.send_msg(state) def test6(self,datapath): self.send_table_mod(datapath) key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_lookup_extractor) key_update_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) state = bebaparser.OFPExpMsgDelFlowState(datapath=datapath, keys=[10,0,0,5], table_id=0) datapath.send_msg(state) def test7(self,datapath): self.send_table_mod(datapath) key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_lookup_extractor) key_update_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) def test8(self,datapath): self.send_table_mod(datapath) key_update_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=0) datapath.send_msg(key_update_extractor) key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST], table_id=0) datapath.send_msg(key_lookup_extractor) def test9(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = bebaparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[0,0,0,0,0,2,0,0,0,0,0,4], table_id=200) datapath.send_msg(state) def test10(self,datapath): self.send_table_mod(datapath) actions = [ofparser.OFPActionOutput(6,0)] match = ofparser.OFPMatch(in_port=5,ip_proto=1,eth_type=0x800,global_state=2863311530) self.add_flow(datapath, 150, match, actions) msg = bebaparser.OFPExpSetGlobalState(datapath=datapath, global_state=2863311530, global_state_mask=0xffffffff) datapath.send_msg(msg) actions = [ofparser.OFPActionOutput(5,0)] match = ofparser.OFPMatch(in_port=6,ip_proto=1,eth_type=0x800) self.add_flow(datapath, 150, match, actions) def test11(self,datapath): self.send_table_mod(datapath) (global_state, global_state_mask) = bebaparser.masked_global_state_from_str("1*1*1*1*1*1*1*1*0*0*1*1*1*1*1*1*") actions = [ofparser.OFPActionOutput(6,0)] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1,global_state=bebaparser.masked_global_state_from_str("1*1*1*1*1*1*1*1*0*0*1*1*1*1*1*1*")) self.add_flow(datapath, 150, match, actions) msg = bebaparser.OFPExpSetGlobalState(datapath=datapath, global_state=global_state, global_state_mask=global_state_mask) datapath.send_msg(msg) actions = [ofparser.OFPActionOutput(5,0)] match = ofparser.OFPMatch(in_port=6,ip_proto=1,eth_type=0x800) self.add_flow(datapath, 200, match, actions) def test12(self,datapath): self.send_table_mod(datapath) actions = [ofparser.OFPActionOutput(6,0)] match = ofparser.OFPMatch(in_port=5,ip_proto=1,eth_type=0x800,global_state=1492) self.add_flow(datapath, 200, match, actions) actions = [bebaparser.OFPExpActionSetGlobalState(global_state=1492)] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(5,0)] match = ofparser.OFPMatch(in_port=6,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 200, match, actions) def test13(self,datapath): self.send_table_mod(datapath) (global_state, global_state_mask) = bebaparser.masked_global_state_from_str("*1*1*1*1*0*0*1*1*1*1*1*1*") actions = [ofparser.OFPActionOutput(6,0)] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1,global_state=bebaparser.masked_global_state_from_str("*1*1*1*1*0*0*1*1*1*1*1*1*")) self.add_flow(datapath, 200, match, actions) actions = [bebaparser.OFPExpActionSetGlobalState(global_state=global_state, global_state_mask=global_state_mask)] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 100, match, actions) actions = [ofparser.OFPActionOutput(5,0)] match = ofparser.OFPMatch(in_port=6,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 200, match, actions) def test14(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) command=255 data=struct.pack(bebaproto.OFP_EXP_STATE_MOD_PACK_STR, command) exp_type=bebaproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test15(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) # dummy data payload command=255 data=struct.pack(bebaproto.OFP_EXP_STATE_MOD_PACK_STR, command) exp_type=2**32-1 msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test16(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) command=bebaproto.OFPSC_EXP_SET_FLOW_STATE # instead of packing into '!Bx' data=struct.pack('!B', command) exp_type=bebaproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test17(self,datapath): state = bebaparser.OFPExpMsgSetFlowState(datapath=datapath, state=88, keys=[0,0,0,0,0,2,0,0,0,0,0,4], table_id=0) datapath.send_msg(state) def test18(self,datapath): state = bebaparser.OFPExpMsgDelFlowState(datapath=datapath, keys=[0,0,0,0,0,2,0,0,0,0,0,4], table_id=0) datapath.send_msg(state) def test19(self,datapath): data=struct.pack(bebaproto.OFP_EXP_STATE_MOD_PACK_STR, bebaproto.OFPSC_EXP_SET_GLOBAL_STATE) exp_type=bebaproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test20(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) act_type=10 data=struct.pack('!I4xB',act_type,0) a = ofparser.OFPActionExperimenterUnknown(experimenter=0XBEBABEBA, data=data) actions = [a] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 100, match, actions) def test21(self,datapath): command=bebaproto.OFPSC_EXP_STATEFUL_TABLE_CONFIG data=struct.pack(bebaproto.OFP_EXP_STATE_MOD_PACK_STR, command) data+=struct.pack('!B',0) exp_type=bebaproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test22(self,datapath): command=bebaproto.OFPSC_EXP_STATEFUL_TABLE_CONFIG data=struct.pack(bebaproto.OFP_EXP_STATE_MOD_PACK_STR, command) data+=struct.pack(bebaproto.OFP_EXP_STATE_MOD_STATEFUL_TABLE_CONFIG_PACK_STR,250,1) exp_type=bebaproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test23(self,datapath): self.send_table_mod(datapath) key_lookup_extractor = bebaparser.OFPExpMsgKeyExtract(datapath=datapath, command=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofproto.OXM_OF_ETH_SRC], table_id=250) datapath.send_msg(key_lookup_extractor) def test24(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = bebaparser.OFPExpMsgDelFlowState(datapath=datapath, keys=[0,0,0,0,0,2,0,0,0,0,0,4], table_id=0) datapath.send_msg(state) def test25(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = bebaparser.OFPExpMsgDelFlowState(datapath=datapath, keys=[0,0,0,0,0,2,0,0,0,0,0,4], table_id=250) datapath.send_msg(state) def test26(self,datapath): command=bebaproto.OFPSC_EXP_DEL_FLOW_STATE data=struct.pack(bebaproto.OFP_EXP_STATE_MOD_PACK_STR, command) data+=struct.pack('!B3xIBBBB',0,3,0,0,0,1) exp_type=bebaproto.OFPT_EXP_STATE_MOD msg = ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(msg) def test27(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) state = bebaparser.OFPExpMsgDelFlowState(datapath=datapath, keys=[0,0,0,0,0,2,0,0,0,0,0,4,0], table_id=0) datapath.send_msg(state) def test28(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) act_type=bebaproto.OFPAT_EXP_SET_STATE data=struct.pack('!I4xB',act_type,0) a = ofparser.OFPActionExperimenterUnknown(experimenter=0XBEBABEBA, data=data) actions = [a] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 100, match, actions) def test29(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) act_type=bebaproto.OFPAT_EXP_SET_GLOBAL_STATE data=struct.pack('!I4x',act_type) a = ofparser.OFPActionExperimenterUnknown(experimenter=0XBEBABEBA, data=data) actions = [a] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1) self.add_flow(datapath, 100, match, actions) def test30(self,datapath): self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) actions = [] match = ofparser.OFPMatch(in_port=5,eth_type=0x800,ip_proto=1) i = bebaparser.OFPInstructionInSwitchPktGen(0, actions) i.instr_type = 56 inst = [i] mod = ofparser.OFPFlowMod(datapath=datapath, table_id=0, priority=100, match=match, instructions=inst) datapath.send_msg(mod) def test31(self,datapath): from scapy.all import Ether, ARP self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) pkt_data = str(Ether(src='00:01:02:03:04:05', dst='46:9c:96:30:ff:d5')/ARP( hwsrc='00:01:02:03:04:05',hwdst='46:9c:96:30:ff:d5',psrc="172.16.0.2",pdst='172.16.0.1',op=2)) command=50 data=struct.pack(bebaproto.OFP_EXP_PKTTMP_MOD_PACK_STR, command) data+=struct.pack(bebaproto.OFP_EXP_PKTTMP_MOD_ADD_PKTTMP_PACK_STR, 0) data+=pkt_data exp_type=bebaproto.OFPT_EXP_PKTTMP_MOD req =ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(req) def test32(self,datapath): from scapy.all import Ether, ARP self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) exp_type=bebaproto.OFPT_EXP_PKTTMP_MOD data=struct.pack('!B',bebaproto.OFPSC_ADD_PKTTMP) req =ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(req) def test33(self,datapath): from scapy.all import Ether, ARP self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) pkt_data = str(Ether(src='00:01:02:03:04:05', dst='46:9c:96:30:ff:d5')/ARP( hwsrc='00:01:02:03:04:05',hwdst='46:9c:96:30:ff:d5',psrc="172.16.0.2",pdst='172.16.0.1',op=2)) command=bebaproto.OFPSC_ADD_PKTTMP data=struct.pack(bebaproto.OFP_EXP_PKTTMP_MOD_PACK_STR, command) exp_type=bebaproto.OFPT_EXP_PKTTMP_MOD req =ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(req) def test34(self,datapath): from scapy.all import Ether, ARP self.send_table_mod(datapath) self.send_key_lookup(datapath) self.send_key_update(datapath) pkt_data = str(Ether(src='00:01:02:03:04:05', dst='46:9c:96:30:ff:d5')/ARP( hwsrc='00:01:02:03:04:05',hwdst='46:9c:96:30:ff:d5',psrc="172.16.0.2",pdst='172.16.0.1',op=2)) command=bebaproto.OFPSC_DEL_PKTTMP data=struct.pack(bebaproto.OFP_EXP_PKTTMP_MOD_PACK_STR, command) exp_type=bebaproto.OFPT_EXP_PKTTMP_MOD req =ofparser.OFPExperimenter(datapath=datapath, experimenter=0xBEBABEBA, exp_type=exp_type, data=data) datapath.send_msg(req) ''' To perform test #35 you have to comment lines 129-130 of ryu/ofproto/oxx_fields.py file and recompile the controller. Furthermore you have to uncomment _monitor35 in this file With this little patch the controller does not mask the match field, triggering the error at switch side. def test35(self,datapath): self.send_table_mod(datapath) actions = [] match = ofparser.OFPMatch(in_port=1,ip_proto=1,eth_type=0x800,state=(7,8)) self.add_flow(datapath, 150, match, actions) ''' def wait_for_error(self,test_num,err_type,err_code): attempts = 0 while len(self.last_error_queue)!=1 and attempts<3: print 'Waiting %d seconds...' % (3-attempts) attempts += 1 time.sleep(1) if len(self.last_error_queue)==1 and self.last_error_queue[0]==(err_type,err_code): print 'Test %d: \x1b[32mSUCCESS!\x1b[0m' % test_num self.last_error_queue = [] else: print 'Test %d: \x1b[31mFAIL\x1b[0m' % test_num self.stop_test_and_exit() def wait_for_two_errors(self,test_num,err_type1,err_code1,err_type2,err_code2): attempts = 0 while len(self.last_error_queue)!=2 and attempts<3: print 'Waiting %d seconds...' % (3-attempts) attempts += 1 time.sleep(1) if len(self.last_error_queue)==2 and self.last_error_queue[0]==(err_type1,err_code1) and self.last_error_queue[1]==(err_type2,err_code2): print 'Test %d: \x1b[32mSUCCESS!\x1b[0m' % test_num self.last_error_queue = [] else: print 'Test %d: \x1b[31mFAIL\x1b[0m' % test_num self.stop_test_and_exit() def try_ping(self,test_num,source,dest,drop_perc,wait=True): if wait: attempts = 0 while len(self.last_error_queue)==0 and attempts<3: print 'Waiting %d seconds...' % (3-attempts) attempts += 1 time.sleep(1) drop_perc = self.net.ping(hosts=[self.net.hosts[source],self.net.hosts[dest]],timeout=1) if len(self.last_error_queue)==0 and drop_perc == drop_perc: print 'Test %d: \x1b[32mSUCCESS!\x1b[0m' % test_num else: print 'Test %d: \x1b[31mFAIL\x1b[0m' % test_num self.stop_test_and_exit() def _monitor0(self,datapath): print("Network is ready") # [TEST 0] Setting the extractor on a stateless stage should be impossible self.test0(datapath) self.wait_for_two_errors(0,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_SET_EXTRACTOR,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_SET_EXTRACTOR) self.restart_mininet() def _monitor1(self,datapath): print("Network is ready") # [TEST 1] Set state action must be performed onto a stateful stage (run-time check => no error is returned!) # mininet> h1 ping -c5 h2 # ping should fail, but rules are correctly installed self.test1(datapath) self.try_ping(test_num=1,source=0,dest=1,drop_perc=100) self.restart_mininet() def _monitor2(self,datapath): print("Network is ready") # [TEST 2] Set state action must be performed onto a stage with table_id less or equal than the number of pipeline's tables (install-time check) self.test2(datapath) self.wait_for_error(2,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor3(self,datapath): print("Network is ready") # [TEST 3] OFPExpMsgKeyExtract: I should provide a number of fields >0 and <MAX_FIELD_COUNT self.test3(datapath) self.wait_for_two_errors(3,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor4(self,datapath): print("Network is ready") # [TEST 4] OFPExpMsgSetFlowState: I should provide a key of size >0 and <MAX_KEY_LEN self.test4(datapath) self.wait_for_two_errors(4,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor5(self,datapath): print("Network is ready") # [TEST 5] OFPExpMsgSetFlowState: I should provide a key of size consistent with the number of fields of the update-scope self.test5(datapath) self.wait_for_error(5,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor6(self,datapath): print("Network is ready") # [TEST 6] OFPExpMsgDelFlowState: I should provide a key of size consistent with the number of fields of the update-scope self.test6(datapath) self.wait_for_error(6,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor7(self,datapath): print("Network is ready") # [TEST 7] OFPExpMsgKeyExtract: lookup-scope and update-scope must provide same length keys self.test7(datapath) self.wait_for_error(7,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor8(self,datapath): print("Network is ready") # [TEST 8] OFPExpMsgKeyExtract: lookup-scope and update-scope must provide same length keys self.test8(datapath) self.wait_for_error(8,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor9(self,datapath): print("Network is ready") # [TEST 9] OFPExpMsgSetFlowState: must be executed onto a stage with table_id<=64 (number of pipeline's tables) self.test9(datapath) self.wait_for_error(9,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor10(self,datapath): print("Network is ready") # [TEST 10] exact match on global_state # mininet> h5 ping -c1 h6 self.test10(datapath) self.try_ping(test_num=10,source=4,dest=5,drop_perc=0) self.restart_mininet() def _monitor11(self,datapath): print("Network is ready") # [TEST 11] masked match on global_state # mininet> h5 ping -c1 h6 self.test11(datapath) self.try_ping(test_num=11,source=4,dest=5,drop_perc=0) self.restart_mininet() def _monitor12(self,datapath): print("Network is ready") # [TEST 12] exact match on global_state # mininet> h5 ping -c2 h6 # the first ping should fail self.test12(datapath) # TODO: if Mininet had 'count' parameter we could simplify the code by checking drop_perc=0.25 with count=2 self.try_ping(test_num=12,source=4,dest=5,drop_perc=50) self.try_ping(test_num=12,source=4,dest=5,drop_perc=0,wait=False) self.restart_mininet() def _monitor13(self,datapath): print("Network is ready") # [TEST 13] masked match on global_state # mininet> h5 ping -c5 h6 # the first ping should fail self.test12(datapath) # TODO: if Mininet had 'count' parameter we could simplify the code by checking drop_perc=0.25 with count=2 self.try_ping(test_num=13,source=4,dest=5,drop_perc=50) self.try_ping(test_num=13,source=4,dest=5,drop_perc=0,wait=False) self.restart_mininet() def _monitor14(self,datapath): print("Network is ready") # [TEST 14]_STATE MOD with unknown command self.test14(datapath) self.wait_for_error(14,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_STATE_MOD_BAD_COMMAND) self.restart_mininet() def _monitor15(self,datapath): print("Network is ready") # [TEST 15]_Beba unknown experimenter message self.test15(datapath) self.wait_for_error(15,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_MESSAGE) self.restart_mininet() def _monitor16(self,datapath): print("Network is ready") # [TEST 16]_STETE MOD experimenter message too short self.test16(datapath) self.wait_for_error(16,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor17(self,datapath): print("Network is ready") # [TEST 17]_Set_state in a non stateful stage self.test17(datapath) self.wait_for_error(17,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_SET_FLOW_STATE) self.restart_mininet() def _monitor18(self,datapath): print("Network is ready") # [TEST 18]_Del_flow_state in a non stateful stage self.test18(datapath) self.wait_for_error(18,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_DEL_FLOW_STATE) self.restart_mininet() def _monitor19(self,datapath): print("Network is ready") # [TEST 19]_setglobalstate with invalid length self.test19(datapath) self.wait_for_error(19,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor20(self,datapath): print("Network is ready") # [TEST 20]_unknown Beba experimenter action self.test20(datapath) self.wait_for_error(20,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_ACTION) self.restart_mininet() def _monitor21(self,datapath): print("Network is ready") # [TEST 21]_State Mod Stateful table config with invalid length self.test21(datapath) self.wait_for_error(21,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor22(self,datapath): print("Network is ready") # [TEST 22]_State Mod Stateful table config with invalid table ID self.test22(datapath) self.wait_for_error(22,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor23(self,datapath): print("Network is ready") # [TEST 23]_Set extractor with invalid table ID self.test22(datapath) self.wait_for_error(23,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor24(self,datapath): print("Network is ready") # [TEST 24]_Del_flow_state with empty state table self.test24(datapath) self.wait_for_error(24,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_DEL_FLOW_STATE) self.restart_mininet() def _monitor25(self,datapath): print("Network is ready") # [TEST 25]_Del_flow_state with invalid table ID self.test25(datapath) self.wait_for_error(25,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_TABLE_ID) self.restart_mininet() def _monitor26(self,datapath): print("Network is ready") # [TEST 26]_Del_flow_state with bad length self.test26(datapath) self.wait_for_error(26,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor27(self,datapath): print("Network is ready") # [TEST 27]_Del_flow_state with key not consistent with update scope self.test27(datapath) self.wait_for_error(27,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor28(self,datapath): print("Network is ready") # [TEST 28] Set state action with invalid length self.test28(datapath) self.wait_for_error(28,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor29(self,datapath): print("Network is ready") # [TEST 29] Set global state action with invalid length self.test29(datapath) self.wait_for_error(29,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor30(self,datapath): print("Network is ready") # [TEST 30]_unknown Beba experimenter instruction self.test30(datapath) self.wait_for_error(30,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_INSTRUCTION) self.restart_mininet() def _monitor31(self,datapath): print("Network is ready") # [TEST 31]_PKTTMP MOD with unknown command self.test31(datapath) self.wait_for_error(31,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_EXP_PKTTMP_MOD_BAD_COMMAND) self.restart_mininet() def _monitor32(self,datapath): print("Network is ready") # [TEST 32]_PKTTMP MOD with too short self.test32(datapath) self.wait_for_error(32,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor33(self,datapath): print("Network is ready") # [TEST 33]_ADD_PKTTMP command too short self.test33(datapath) self.wait_for_error(33,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) self.restart_mininet() def _monitor34(self,datapath): print("Network is ready") # [TEST 34]_DEL_PKTTMP command too short self.test34(datapath) self.wait_for_error(34,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_EXP_LEN) #self.restart_mininet() self.stop_test_and_gracefully_exit() ''' To perform the test #35 you have to comment lines 129-130 of ryu/ofproto/oxx_fields.py file and recompile the controller Furthermore you have to uncomment test35 in this file With this little patch the controller does not mask the match field, triggering the error at switch side. def _monitor35(self,datapath): print("Network is ready") # [TEST 35] Bad masked state match field self.test35(datapath) self.wait_for_error(35,ofproto.OFPET_EXPERIMENTER,bebaproto.OFPEC_BAD_MATCH_WILDCARD) #self.restart_mininet() self.stop_test_and_gracefully_exit() ''' @set_ev_cls(ofp_event.EventOFPErrorExperimenterMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER]) def exp_error_msg_handler(self, ev): msg = ev.msg if msg.experimenter == BEBA_EXPERIMENTER_ID: self.last_error_queue.append((msg.type,msg.exp_type)) def stop_test_and_exit(self): # Kill Mininet and/or Ryu self.net.stop() os.system("sudo mn -c 2> /dev/null") os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") def stop_test_and_gracefully_exit(self): # Kill Mininet and/or Ryu self.net.stop() os.system("sudo mn -c 2> /dev/null") # Send SIGTERM instead of SIGKILL os.system("kill -7 $(pidof -x ryu-manager) 2> /dev/null") def restart_mininet(self): print 'Restarting Mininet\n' os.system("sudo mn -c 2> /dev/null") self.net = Mininet(topo=SingleSwitchTopo(7),switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,listenPort=6634,autoStaticArp=True) self.net.start()
class GenericMiddleBoxTopology(object): """ This class creates a basic experminet topology containing traffic sources and targets as well as intermediate middlebox machines. The number of hosts per type can be defined in the constructor. Management network: 10.0.0.0/8 User data network: 20.0.0.0/8 client1 -- -- mb1 -- -- target1 | | | | | -- s1 -- s3 -- s2 -- | | | | | client2 -- -- mb2 -- -- target2 """ def __init__(self, source_instances=2, target_instances=1, mbox_instances=2): self.source_instances = source_instances self.target_instances = target_instances self.mbox_instances = mbox_instances # bring up Mininet self.net = Mininet( host=CPULimitedHost, link=TCLink, autoSetMacs=True) # topology elements self.controllers = [] self.switches = [] self.middlebox_hosts = [] self.source_hosts = [] self.target_hosts = [] # single pointers to nw components self.control_switch = None self.source_switch = None self.target_switch = None self.default_controller = None self.data_controller = None # do network setup self.setup_controllers() self.setup_switches() self.setup_hosts() def start_network(self): # run the network self.net.staticArp() #self.net.start() # build network self.net.build() # start controllers self.data_controller.start() self.default_controller.start() # start switches and assign controller self.control_switch.start([self.default_controller]) self.source_switch.start([self.data_controller]) self.target_switch.start([self.data_controller]) # additional start tasks self.config_middlebox_hosts() self.run_middlebox_hosts() self.run_target_hosts() time.sleep(5) # give servers some time to come up self.run_source_hosts() def test_network(self): # debugging print "### Dumping host connections" dumpNodeConnections(self.net.hosts) if PARAMS.duration < 0: # only test if we are in CLI mode (save time) print "### Testing middlebox replica connectivity" if self.net.ping(hosts=self.middlebox_hosts) < 0.1: print "### OK" def enter_cli(self): # enter user interface CLI(self.net) self.net.stop() def setup_controllers(self): # default controller for managemen switch c1 = self.net.addController("c1", port=6634) self.default_controller = c1 self.controllers.append(c1) # custom controller for user traffic management c2 = self.net.addController( "c2", controller=RemoteController, ip='127.0.0.1', port=6633) self.data_controller = c2 self.controllers.append(c2) def setup_switches(self): # management switch s = self.net.addSwitch("s1") self.switches.append(s) self.control_switch = s # source switch s = self.net.addSwitch("s2") self.switches.append(s) self.source_switch = s # target switch s = self.net.addSwitch("s3") self.switches.append(s) self.target_switch = s def setup_hosts(self): """ basic host setup """ global PARAMS # middlebox hosts for i in range(0, self.mbox_instances): # we assume a max of 16 MBs in all experiments # all these MBs together use max 50% of CPU mb = self.net.addHost( "mb%d" % (i + 1), cpu=float(PARAMS.cpumb)/float(PARAMS.maxnumbermb)) self.middlebox_hosts.append(mb) # management plane links cdl = int(PARAMS.controldelay) if cdl > 0: self.net.addLink(mb, self.control_switch, delay="%dms" % cdl, bw=1000) else: self.net.addLink(mb, self.control_switch, bw=1000) # data plane links self.net.addLink(mb, self.source_switch, bw=1000) self.net.addLink(mb, self.target_switch, bw=1000) # source hosts for i in range(0, self.source_instances): sh = self.net.addHost( "source%d" % (i + 1), ip="20.0.0.%d" % (i + 1), cpu=float(PARAMS.cpusource)) self.source_hosts.append(sh) self.net.addLink(sh, self.source_switch, bw=1000) # target hosts for i in range(0, self.target_instances): th = self.net.addHost( "target%d" % (i + 1), ip="20.0.1.%d" % (i + 1), cpu=float(PARAMS.cputarget)) self.target_hosts.append(th) self.net.addLink(th, self.target_switch, bw=1000) def config_middlebox_hosts(self): """ additional runtime configurations of MB hosts """ for mb in self.middlebox_hosts: config_bridge(mb) def run_middlebox_hosts(self): """ run NF functionality inside MB hosts """ pass def run_target_hosts(self): """ run server functionality in target hosts """ for th in self.target_hosts: # start target.py on each target machine p = USER_BASE_PORT for sh in self.source_hosts: # start target.py for each source host once on each target host #th.cmd("./target.py %d > log/target_%s_%s.log 2>&1 &" # % (p, th.name, sh.name)) # one port for each source th.cmd("iperf -s -p %d > log/target_%s_%s.log 2>&1 &" % (p, th.name, sh.name)) # one port for each source p += 1 def run_source_hosts(self): """ run client functionality in source hosts """ th = self.target_hosts[0] p = USER_BASE_PORT for sh in self.source_hosts: # start source.py on source hosts and connect to first target host #sh.cmd("./source.py %s %d %s > log/source_%s.log 2>&1 &" # % (th.IP(), p, PARAMS.srclambda, sh.name)) sh.cmd("iperf -c %s -p %d -t 120 -i 10 > log/source_%s.log 2>&1 &" % (th.IP(), p, sh.name)) #print("./source.py %s %d %s > log/source_%s.log 2>&1 &" # % (th.IP(), p, PARAMS.srclambda, sh.name)) p += 1 def stop_topo(self): """ Do cleanup tasks. """ pass
# sleep(10) # CLI(net) # print "Dumping host connections" # dumpNodeConnections(net.hosts) # print "Testing network connectivity" # net.pingAll() h1 = net.get("h1") print h1 h4 = net.get("h4") print h4 net.ping([h1, h4]) # net.iperf((h1, h4)) # h1 = net.get('h1') result = h1.cmd("ifconfig") print result print net.hosts # net.ping(hosts=h1,h4) # net.stop()
class MininetWrapper(object): def __init__(self): self.mininet_client = None self.topology = [] self.delay = None def set_delay(self, delay): delay = str(int(delay)) + 'ms' self.delay = delay def run_mininet(self, topology_string): """ Create and run multiple link network """ self.topo_client = Topo() hosts = set() switches = set() relations = re.sub(r's', '', topology_string) relations = [i.split(':') for i in relations.split(',') if 'h' not in i] relations = [[int(y) - 1 for y in x] for x in relations] builtin.log(relations, 'DEBUG') verticles_count = len(set(list(itertools.chain(*relations)))) builtin.log(self.topology, 'DEBUG') for i in xrange(verticles_count): temp = [] for j in xrange(verticles_count): temp.append(-1) self.topology.append(temp[:]) builtin.log(self.topology, 'DEBUG') for i in relations: self.topology[i[0]][i[1]] = 1 self.topology[i[1]][i[0]] = 1 builtin.log(self.topology, 'DEBUG') for v1, v2 in [x.split(':') for x in str(topology_string).split(',')]: if 'h' in v1 and v1 not in hosts: self.topo_client.addHost(v1) hosts.add(v1) if 'h' in v2 and v2 not in hosts: self.topo_client.addHost(v2) hosts.add(v2) if 's' in v1 and v1 not in switches: self.topo_client.addSwitch(v1) switches.add(v1) if 's' in v2 and v2 not in switches: self.topo_client.addSwitch(v2) switches.add(v2) if self.delay: self.topo_client.addLink(v1, v2, delay=self.delay) else: self.topo_client.addLink(v1, v2) self.mininet_client = Mininet(switch=user_switch, controller=remote_controller, topo=self.topo_client, link=TCLink) self.mininet_client.start() builtin.log('Links info:') for link in self.topo_client.links(withKeys=True, withInfo=True): builtin.log(link) # self.mininet_client.waitConnected(timeout=20) sleep(20) def stop_mininet(self): if self.mininet_client is not None: self.mininet_client.stop() if self.topology: self.topology = [] self.delay = None cleanup() sleep(20) def kill_link(self, host1, host2): host1, host2 = str(host1), str(host2) self.mininet_client.configLinkStatus(host1, host2, 'down') if 'h' not in host1 and 'h' not in host2: num_1 = int(host1[1:]) - 1 num_2 = int(host2[1:]) - 1 self.topology[num_1][num_2] = -1 self.topology[num_2][num_1] = -1 builtin.log(self.topology, 'DEBUG') builtin.log('Down link {0} - {1}'.format(host1, host2), 'DEBUG') def check_link(self, host1, host2): switch = self.mininet_client.getNodeByName(host1) connections = switch.connectionsTo(host2) if connections: return True else: return False def up_link(self, host1, host2): host1, host2 = str(host1), str(host2) self.mininet_client.configLinkStatus(host1, host2, 'up') if 'h' not in host1 and 'h' not in host2: num_1 = int(host1[1:]) - 1 num_2 = int(host2[1:]) - 1 self.topology[num_1][num_2] = 1 self.topology[num_2][num_1] = 1 builtin.log(self.topology, 'DEBUG') builtin.log('Up link {0} - {1}'.format(host1, host2), 'DEBUG') def stop_node(self, name): node = self.mininet_client.getNodeByName(name) node.stop() num_node = int(name[1:]) - 1 self.topology[num_node][num_node] = -1 builtin.log('Node {0} was stoped'.format(name), 'DEBUG') def check_connected_node(self, name): switch = self.mininet_client.getNodeByName(name) return switch.connected() # NOTE(msenin) unstable method - after stoping mininet cant start node # mininet doesn't return exception def start_node(self, name): node = self.mininet_client.getNodeByName(name) # TODO (msenin) add option controller_name controllers = self.mininet_client.controllers builtin.log('Controllers: {0}'.format(controllers), 'DEBUG') node.start([controllers[0]]) def check_rules(self): switches = self.mininet_client.switches results = [] regex = (r'(cookie=[\w\d]+),|(dl_dst=[\w\d:\/]{35})' '|(priority=[\d]+),|(dl_src=[\w\d:\/]{17})') for switch in switches: ans = switch.dpctl('dump-flows -O OpenFlow13') builtin.log( 'Rules on the switch {0}: {1}'.format(switch.name, ans), 'DEBUG') ans_with_regex = "" for m in re.finditer(regex, ans): for i in xrange(1, 5): if m.group(i): ans_with_regex = ans_with_regex + ', ' + m.group(i) builtin.log( 'Rules with regex {0}: {1}'.format(switch.name, ans), 'DEBUG') results.append({switch.name: ans_with_regex}) return results def compare_dumped_flows(self, rules1, rules2): rules_1 = str(rules1) rules_2 = str(rules2) builtin.log('Compare two flow tables(without changing parts): ', 'DEBUG') builtin.log(rules_1, 'DEBUG') builtin.log(rules_2, 'DEBUG') if rules_1 != rules_2: return False return True def ping(self, name1, name2): node1 = self.mininet_client.getNodeByName(name1) node2 = self.mininet_client.getNodeByName(name2) ping = self.mininet_client.ping(hosts=[node1, node2], timeout=10) num1, num2 = name1[1:], name2[1:] cmd1 = node1.cmd('ifconfig') builtin.log('{0}'.format(cmd1), 'DEBUG') cmd1 = node1.cmd('ping -d -c 5 -w 5 10.0.0.' + num2) builtin.log('{0}'.format(cmd1), 'DEBUG') cmd2 = node2.cmd('ifconfig') builtin.log('{0}'.format(cmd2), 'DEBUG') cmd1 = node2.cmd('ping -d -c 5 -w 5 10.0.0.' + num1) builtin.log('{0}'.format(cmd1), 'DEBUG') return int(ping) def check_route_state(self, route): # TODO (msenin) delete method after tests refactoring """Check the state of route :param route: list with verticles (each verticle is switch id) """ route = map(lambda x: int(x) - 1, route) for i in xrange(1, len(route)): prev = route[i - 1] cur = route[i] if (self.topology[prev][prev] == -1 or self.topology[cur][cur] == -1): return False if self.topology[prev][cur] == -1: return False return True def contains_route_in_routes(self, route, routes): builtin.log("route: {0}".format(route), 'DEBUG') builtin.log("routes: {0}".format(routes), 'DEBUG') route = map(lambda x: int(x), route) for i in routes: if i.get('route') and map(lambda x: int(x), i['route']) == route: return True return False def parse_tree(self, resp): """Define and check the routes and links :param resp:json from response """ builtin.log("JSON for parsing: {0}".format(resp), 'DEBUG') source_node_list = set() destination_node_list = set() links_dict = collections.OrderedDict() routes = [] states_dict = dict() route_container = resp.get('route-container') route_list = route_container.get('route-list') route_list_length = len(route_list) # TODO for i in range(0, route_list_length): needed_leaf = i route_leaf = route_list[needed_leaf] leaf_source = route_leaf.get('source') leaf_destination = route_leaf.get('destination') states_dict['source'] = leaf_source states_dict['destination'] = leaf_destination route = route_leaf.get('route', []) for i in range(0, len(route)): route_state = dict() vertexes = set() path = route[i] state = path.get('state') route_state['state'] = state route_state['route'] = vertexes routes.append(route_state) states_dict['routes'] = routes links = path.get('path') links_count = len(links) for j in range(0, links_count): link = links[j] link_source = link.get('source') link_destination = link.get('destination') source_node = link_source.get('source-node') destination_node = link_destination.get('dest-node') source_flow = source_node.split(':')[-1] destination_flow = destination_node.split(':')[-1] vertexes.add(source_flow) vertexes.add(destination_flow) source_node_list.add(source_node) destination_node_list.add(destination_node) links_dict[source_node] = destination_node return states_dict def parse_tree_2(self, resp): """Parse output json from ncn restconfig :param resp:json from response [{'state': 'up', 'destination': '4', 'route': ['1', '4'], 'source': '1', 'id': 100}, .................................................................... {'destination': '3', 'source': '1'}, {'destination': '7', 'source': '1'}] """ builtin.log("JSON for parsing: {0}".format(resp), 'DEBUG') routes = [] route_list = resp.get('route-container').get('route-list') for routes_between_switches in route_list: routes_rest_conf = routes_between_switches.get("route") if routes_rest_conf: # NOTE (msenin) # format of fields 'source' and 'destination': openflow:4 for route_rest in routes_rest_conf: route = {} route['source'] = int(route_rest['source'][9:]) route['destination'] = \ int(route_rest['destination'][9:]) route['state'] = route_rest['state'] pathes = route_rest.get('path') route['id'] = route_rest.get('id') path = [] for link in pathes: link_source = link.get('source') link_destination = link.get('destination') source_node = link_source.get('source-node') destination_node = link_destination.get('dest-node') source_flow = int(source_node[9:]) destination_flow = int(destination_node[9:]) if source_flow not in path: path.append(source_flow) if destination_flow not in path: path.append(destination_flow) route['route'] = path routes.append(route) else: route = {} route['source'] = int(routes_between_switches['source'][9:]) route['destination'] = \ int(routes_between_switches['destination'][9:]) routes.append(route) return routes def check_route_state_by_DOM_tree(self, route, tree): """ return 1 if route up, -1 down and 0 if unexist """ if isinstance(route, str) or isinstance(route, unicode): route = list(route[1:-1].split(',')) route = map(lambda x: int(x), route) builtin.log("route: {0}".format(route), 'DEBUG') tree = self.parse_tree_2(tree) builtin.log("tree: {0}".format(tree), 'DEBUG') filtered_tree = filter(lambda x: x.get('route') == route, tree) if filtered_tree: if filtered_tree[0]['state'] == 'up': return 1 else: return -1 else: return 0 def filter_DOM_tree_by_field(self, condition, tree): # TODO (msenin) add logger tree = self.parse_tree_2(tree) filtered_tree = filter(lambda field: eval(condition), tree) return filtered_tree
def main(cli=0): net = Mininet( controller = None ) # add hosts h1 = net.addHost( 'h1', ip = '172.16.10.1/24' ) h2 = net.addHost( 'h2', ip = '172.16.10.2/24' ) # add switch 1 sw1 = net.addSwitch( 'sw1', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw1/stp', pcap_dump = True ) # add switch 2 sw2 = net.addSwitch( 'sw2', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw2/stp', pcap_dump = True ) # add switch 3 sw3 = net.addSwitch( 'sw3', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw3/stp', pcap_dump = True ) # add switch 4 sw4 = net.addSwitch( 'sw4', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw4/stp', pcap_dump = True ) # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw1, h1, port1 = 1 ) net.addLink( sw2, h2, port1 = 1 ) net.addLink( sw1, sw3, port1 = 2, port2 = 1 ) net.addLink( sw1, sw4, port1 = 3, port2 = 1 ) net.addLink( sw2, sw3, port1 = 2, port2 = 2 ) net.addLink( sw2, sw4, port1 = 3, port2 = 2 ) net.addLink( sw3, sw4, port1 = 3, port2 = 3 ) else: net.addLink( sw1, h1, port1 = 1, fast = False ) net.addLink( sw2, h2, port1 = 1, fast = False ) net.addLink( sw1, sw3, port1 = 2, port2 = 1, fast = False ) net.addLink( sw1, sw4, port1 = 3, port2 = 1, fast = False ) net.addLink( sw2, sw3, port1 = 2, port2 = 2, fast = False ) net.addLink( sw2, sw4, port1 = 3, port2 = 2, fast = False ) net.addLink( sw3, sw4, port1 = 3, port2 = 3, fast = False ) net.start() result = 0 if cli: CLI( net ) else: sleep(30) node_values = net.values() print node_values hosts = net.hosts print hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts, 30) # print host arp table & routes for host in hosts: print "ARP ENTRIES ON HOST" print host.cmd('arp -n') print "HOST ROUTES" print host.cmd('route') print "HOST INTERFACE LIST" intfList = host.intfNames() print intfList if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" net.stop() return result
def main(cli=0): net = Mininet( controller = None ) # add hosts h1 = net.addHost( 'h1', ip = '172.16.101.5/24', mac = '00:04:00:00:00:02' ) h2 = net.addHost( 'h2', ip = '172.18.101.5/24', mac = '00:04:00:00:00:03' ) h3 = net.addHost( 'h3', ip = '172.16.102.5/24', mac = '00:05:00:00:00:02' ) h4 = net.addHost( 'h4', ip = '172.18.102.5/24', mac = '00:05:00:00:00:03' ) # add switch 1 - spine 1 sw1 = net.addSwitch( 'sw1', target_name = "p4sonicswitch", cls = P4DockerSwitch, config_fs = 'configs/sw1/l3_bgp', pcap_dump = True, start_program='/bin/bash') # add switch 2 - spine 2 sw2 = net.addSwitch( 'sw2', target_name = "p4sonicswitch", cls = P4DockerSwitch, config_fs = 'configs/sw2/l3_bgp', pcap_dump = True, start_program='/bin/bash') # add switch 3 - leaf 1 sw3 = net.addSwitch( 'sw3', target_name = "p4sonicswitch", cls = P4DockerSwitch, config_fs = 'configs/sw3/l3_bgp', pcap_dump = True, start_program='/bin/bash') # add switch 4 - leaf 2 sw4 = net.addSwitch( 'sw4', target_name = "p4sonicswitch", cls = P4DockerSwitch, config_fs = 'configs/sw4/l3_bgp', pcap_dump = True, start_program='/bin/bash') # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw3, h1, port1 = 1 ) net.addLink( sw3, h2, port1 = 2 ) net.addLink( sw1, sw3, port1 = 1, port2 = 3 ) net.addLink( sw2, sw3, port1 = 1, port2 = 4 ) net.addLink( sw4, h3, port1 = 1 ) net.addLink( sw4, h4, port1 = 2 ) net.addLink( sw1, sw4, port1 = 2, port2 = 3 ) net.addLink( sw2, sw4, port1 = 2, port2 = 4 ) else: net.addLink( sw3, h1, port1 = 1 , fast=False ) net.addLink( sw3, h2, port1 = 2 , fast=False ) net.addLink( sw1, sw3, port1 = 1, port2 = 3 , fast=False ) net.addLink( sw2, sw3, port1 = 1, port2 = 4 , fast=False ) net.addLink( sw4, h3, port1 = 1 , fast=False ) net.addLink( sw4, h4, port1 = 2 , fast=False ) net.addLink( sw1, sw4, port1 = 2, port2 = 3 , fast=False ) net.addLink( sw2, sw4, port1 = 2, port2 = 4 , fast=False ) sw1.cpFile('run_bm_sw1.sh', '/sonic-swss/bmv2/run_bm.sh') sw1.execProgram('/scripts/startup.sh', args='-m 00:00:01:00:00:01') sw1.execProgram("/configs/startup_config.sh") sw2.cpFile('run_bm_sw2.sh', '/sonic-swss/bmv2/run_bm.sh') sw2.execProgram('/scripts/startup.sh', args='-m 00:00:01:00:00:02') sw2.execProgram("/configs/startup_config.sh") sw3.cpFile('run_bm_sw3.sh', '/sonic-swss/bmv2/run_bm.sh') sw3.execProgram('/scripts/startup.sh', args='-m 00:00:01:00:00:03') sw3.execProgram("/configs/startup_config.sh") sw4.cpFile('run_bm_sw4.sh', '/sonic-swss/bmv2/run_bm.sh') sw4.execProgram('/scripts/startup.sh', args='-m 00:00:01:00:00:04') sw4.execProgram("/configs/startup_config.sh") net.start() # hosts configuration - ipv4 h1.setDefaultRoute( 'via 172.16.101.1' ) h2.setDefaultRoute( 'via 172.18.101.1' ) h3.setDefaultRoute( 'via 172.16.102.1' ) h4.setDefaultRoute( 'via 172.18.102.1' ) sw1.cmd( 'service quagga start') sw2.cmd( 'service quagga start') sw3.cmd( 'service quagga start') sw4.cmd( 'service quagga start') result = 0 if cli: CLI(net) else: sleep(90) node_values = net.values() print node_values hosts = net.hosts print hosts # ping hosts print "PING BETWEEN THE HOSTS" result = net.ping(hosts,30) if result != 0: print "PING FAILED BETWEEN HOSTS %s" % (hosts) else: print "PING SUCCESSFUL!!!" # print host arp table & routes for host in hosts: print "ARP ENTRIES ON HOST" print host.cmd('arp -n') print "HOST ROUTES" print host.cmd('route') print "HOST INTERFACE LIST" intfList = host.intfNames() print intfList net.stop() return result
class FaucetTest(unittest.TestCase): ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss' CONFIG = '' CONTROLLER_IPV4 = '10.0.0.254' CONTROLLER_IPV6 = 'fc00::1:254' OFCTL = 'ovs-ofctl -OOpenFlow13' def setUp(self): self.tmpdir = tempfile.mkdtemp() os.environ['FAUCET_CONFIG'] = os.path.join(self.tmpdir, 'faucet.yaml') os.environ['FAUCET_LOG'] = os.path.join(self.tmpdir, 'faucet.log') os.environ['FAUCET_EXCEPTION_LOG'] = os.path.join(self.tmpdir, 'faucet-exception.log') self.debug_log_path = os.path.join(self.tmpdir, 'ofchannel.log') self.CONFIG = '\n'.join(( self.get_config_header(DPID, HARDWARE), self.CONFIG % PORT_MAP, 'ofchannel_log: "%s"' % self.debug_log_path)) open(os.environ['FAUCET_CONFIG'], 'w').write(self.CONFIG) self.net = None self.topo = None def get_config_header(self, dpid, hardware): return ''' --- dp_id: 0x%s name: "faucet-1" hardware: "%s" ''' % (dpid, hardware) def attach_physical_switch(self): switch = self.net.switches[0] hosts_count = len(self.net.hosts) for i, test_host_port in enumerate(sorted(SWITCH_MAP)): port_i = i + 1 mapped_port_i = port_i + hosts_count phys_port = Intf(SWITCH_MAP[test_host_port], node=switch) switch.cmd('ifconfig %s up' % phys_port) switch.cmd('ovs-vsctl add-port %s %s' % (switch.name, phys_port.name)) for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)): port_x, port_y = port_pair switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % ( self.OFCTL, switch.name, port_x, port_y)) for _ in range(20): if (os.path.exists(self.debug_log_path) and os.path.getsize(self.debug_log_path) > 0): return time.sleep(1) print 'physical switch could not connect to controller' sys.exit(-1) def start_net(self): self.net = Mininet(self.topo, controller=FAUCET) self.net.start() if SWITCH_MAP: self.attach_physical_switch() else: self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') dumpNodeConnections(self.net.hosts) def tearDown(self): if self.net is not None: self.net.stop() # Mininet takes a long time to actually shutdown. # TODO: detect and block when Mininet isn't done. time.sleep(5) shutil.rmtree(self.tmpdir) def add_host_ipv6_address(self, host, ip_v6): host.cmd('ip -6 addr add %s dev %s' % (ip_v6, host.intf())) def one_ipv4_ping(self, host, dst): ping_result = host.cmd('ping -c1 %s' % dst) self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result)) def one_ipv4_controller_ping(self, host): self.one_ipv4_ping(host, self.CONTROLLER_IPV4) def one_ipv6_ping(self, host, dst): # TODO: retry our one ping. We should not have to retry. for _ in range(2): ping_result = host.cmd('ping6 -c1 %s' % dst) if re.search(self.ONE_GOOD_PING, ping_result): return self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result)) def one_ipv6_controller_ping(self, host): self.one_ipv6_ping(host, self.CONTROLLER_IPV6) def wait_until_matching_flow(self, flow, timeout=5): # TODO: actually verify flows were communicated to the physical switch. # Could use size of ofchannel log, though this is not authoritative. if SWITCH_MAP: time.sleep(1) return switch = self.net.switches[0] for _ in range(timeout): dump_flows_cmd = '%s dump-flows %s' % (self.OFCTL, switch.name) dump_flows = switch.cmd(dump_flows_cmd) for line in dump_flows.split('\n'): if re.search(flow, line): return time.sleep(1) print flow, dump_flows self.assertTrue(re.search(flow, dump_flows)) def swap_host_macs(self, first_host, second_host): first_host_mac = first_host.MAC() second_host_mac = second_host.MAC() first_host.setMAC(second_host_mac) second_host.setMAC(first_host_mac) def verify_ipv4_routing(self, first_host, first_host_routed_ip, second_host, second_host_routed_ip): first_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (first_host.intf(), first_host_routed_ip.ip))) second_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (second_host.intf(), second_host_routed_ip.ip))) first_host.cmd(('route add -net %s gw %s' % ( second_host_routed_ip.masked(), self.CONTROLLER_IPV4))) second_host.cmd(('route add -net %s gw %s' % ( first_host_routed_ip.masked(), self.CONTROLLER_IPV4))) self.net.ping(hosts=(first_host, second_host)) self.wait_until_matching_flow( 'nw_dst=%s.+set_field:%s->eth_dst' % ( first_host_routed_ip.masked(), first_host.MAC())) self.wait_until_matching_flow( 'nw_dst=%s.+set_field:%s->eth_dst' % ( second_host_routed_ip.masked(), second_host.MAC())) self.one_ipv4_ping(first_host, second_host_routed_ip.ip) self.one_ipv4_ping(second_host, first_host_routed_ip.ip) def verify_ipv6_routing(self, first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip): self.add_host_ipv6_address(first_host, first_host_ip) self.add_host_ipv6_address(second_host, second_host_ip) self.one_ipv6_ping(first_host, second_host_ip.ip) self.one_ipv6_ping(second_host, first_host_ip.ip) self.add_host_ipv6_address(first_host, first_host_routed_ip) self.add_host_ipv6_address(second_host, second_host_routed_ip) first_host.cmd('ip -6 route add %s via %s' % ( second_host_routed_ip.masked(), self.CONTROLLER_IPV6)) second_host.cmd('ip -6 route add %s via %s' % ( first_host_routed_ip.masked(), self.CONTROLLER_IPV6)) self.wait_until_matching_flow( 'ipv6_dst=%s.+set_field:%s->eth_dst' % ( first_host_routed_ip.masked(), first_host.MAC())) self.wait_until_matching_flow( 'ipv6_dst=%s.+set_field:%s->eth_dst' % ( second_host_routed_ip.masked(), second_host.MAC())) self.one_ipv6_controller_ping(first_host) self.one_ipv6_controller_ping(second_host) self.one_ipv6_ping(first_host, second_host_routed_ip.ip) self.one_ipv6_ping(second_host, first_host_routed_ip.ip)
prismconf.close() call(['bash','./mul.sh','start','prism']) sleep(3) def configure_prism_intf( net ): for h in net.hosts: for port, (peer_name, peer_port) in net.topo.ports[h.name].iteritems(): idx = h.IP().split(".") dfl_route = "%s.%s.%s.1/24" % (idx[0], idx[1], idx[2]) call(['ifconfig',"pr-%s-eth%d"%(peer_name,peer_port),dfl_route,'up']) def ping( node ): nodePoller = poll() nodPoller.register( node.stdout ) bothPoller = poll() bothPoller.register(0) topo = LeafspineTopo(3) net = Mininet(topo=topo, controller=RemoteController, cleanup=True) configure_prism(net) net.start() configure_prism_intf(net) while True: ploss = net.ping() if ploss == 0: break CLI(net) net.stop() cleanup()