def mobilityTest(): "A simple test of mobility" print( '* Simple mobility test' ) net = Mininet( topo=LinearTopo( 3 ), switch=MobilitySwitch ) print( '* Starting network:' ) net.start() printConnections( net.switches ) print( '* Testing network' ) net.pingAll() print( '* Identifying switch interface for h1' ) h1, old = net.get( 'h1', 's1' ) for s in 2, 3, 1: new = net[ 's%d' % s ] port = randint( 10, 20 ) print( '* Moving', h1, 'from', old, 'to', new, 'port', port ) hintf, sintf = moveHost( h1, old, new, newPort=port ) print( '*', hintf, 'is now connected to', sintf ) print( '* Clearing out old flows' ) for sw in net.switches: sw.dpctl( 'del-flows' ) print( '* New network:' ) printConnections( net.switches ) print( '* Testing connectivity:' ) net.pingAll() old = new net.stop()
def perfTest(): "Create network and run simple performance test" topo =LinearTopo(k=Y) net =Mininet(topo=topo, host=CPULimitedHost, link=TCLink,controller=OVSController) count=1 for i in range(1,X+1,2): stri="h" stri2="127.0.0." stri2=stri2+str(count) count=count+1 stri=stri+str(i) hi=net.get(stri) hi.setIP(stri2,24) count=1 for i in range(2,X+1,2): stri="h" stri2="192.168.0." stri=stri+str(i) stri2=stri2+str(count) count=count+1 hi=net.get(stri) hi.setIP(stri2,29) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() net.stop()
def main(): "Create and run experiment" start = time() topo = MNTopo() host = custom(CPULimitedHost, cpu=.15) # 15% of system bandwidth link = custom(TCLink, max_queue_size=200) net = Mininet(topo=topo, host=host, link=link) net.start() print "*** Dumping network connections:" dumpNetConnections(net) print "*** Testing connectivity" net.pingAll() if args.cli: # Run CLI instead of experiment CLI(net) else: print "*** Running experiment" run_topology_experiment(net) net.stop() end = time() os.system("killall -9 bwm-ng") print "Experiment took %.3f seconds" % (end - start)
def RunTest(): """TOPO""" topo = FatTreeTopo() topo.topoCreate(4,4,2,2,2) CONTROLLER_NAME = topo.crtlname CONTROLLER_IP = topo.crtlip CONTROLLER_PORT = topo.crtlport net = Mininet(topo=topo,build= False,link=TCLink, controller=None) time.sleep(1) net.addController( CONTROLLER_NAME,controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT) net.start() dumpNodeConnections(net.hosts) net.pingAll() h1 = net.get('h000') h16 = net.get('h311') h2 = net.get('h001') h1.popen('iperf -s -u -i 1') h16.popen('iperf -s -u -i 1') h2.cmdPrint('iperf -c '+ h1.IP() + ' -u -t 10 -i 1 -b 100m') h2.cmdPrint('iperf -c '+ h16.IP() + ' -u -t 10 -i 1 -b 100m') CLI(net) net.stop()
def required(x,y): topo = MyTopo(x, y) net = Mininet(topo,host=CPULimitedHost, link=TCLink) net.start() for i in xrange(y): for j in xrange(y): if (i+1)%2==0 and (j+1)%2==1: net.nameToNode["h"+str(i+1)].cmd("iptables -A OUTPUT -o h"+str(i+1)+"-eth0 -d 10.0.0."+ str(j+1)+" -j DROP") net.nameToNode["h"+str(j+1)].cmd("iptables -A OUTPUT -o h"+str(j+1)+"-eth0 -d 10.0.0."+ str(i+1)+" -j DROP") elif (i+1)%2==1 and (j+1)%2==0: net.nameToNode["h"+str(i+1)].cmd("iptables -A OUTPUT -o h"+str(i+1)+"-eth0 -d 10.0.0."+ str(j+1)+" -j DROP") net.nameToNode["h"+str(j+1)].cmd("iptables -A OUTPUT -o h"+str(j+1)+"-eth0 -d 10.0.0."+ str(i+1)+" -j DROP") net.pingAll() try: print "Testing bandwidth between h1 and h3" h1, h3 = net.get('h1', 'h3') net.iperf((h1, h3)) except: c=1 try: print "Testing bandwidth between h1 and h3" h4, h2 = net.get('h2', 'h4') net.iperf((h2, h4)) except: c=1 dumpNodeConnections(net.switches) CLI(net) net.stop()
def bwtest( cpuLimits, period_us=100000, seconds=5 ): """Example/test of link and CPU bandwidth limits cpu: cpu limit as fraction of overall CPU time""" topo = TreeTopo( depth=1, fanout=2 ) results = {} for sched in 'rt', 'cfs': print '*** Testing with', sched, 'bandwidth limiting' for cpu in cpuLimits: host = custom( CPULimitedHost, sched=sched, period_us=period_us, cpu=cpu ) net = Mininet( topo=topo, host=host ) net.start() net.pingAll() hosts = [ net.getNodeByName( h ) for h in topo.hosts() ] client, server = hosts[ 0 ], hosts[ -1 ] server.cmd( 'iperf -s -p 5001 &' ) waitListening( client, server, 5001 ) result = client.cmd( 'iperf -yc -t %s -c %s' % ( seconds, server.IP() ) ).split( ',' ) bps = float( result[ -1 ] ) server.cmdPrint( 'kill %iperf' ) net.stop() updated = results.get( sched, [] ) updated += [ ( cpu, bps ) ] results[ sched ] = updated return results
def rosTest(): "Create network and run simple performance test" topo = SingleSwitchTopo(n=3) net = Mininet(topo=topo, link=TCLink) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() h1, h2, h3 = net.get('h1', 'h2', 'h3') print "Starting roscore" output = h3.cmd('source /opt/ros/indigo/setup.bash && ' 'export ROS_IP='+h3.IP()+ ' && export ROS_MASTER_URI=http://'+h3.IP()+ ':11311 && roscore &') print output print "Starting receiver" output = h2.cmd('export LD_LIBRARY_PATH=/home/jeb/:$LD_LIBRARY_PATH && export ROS_IP='+h2.IP()+ ' && export ROS_MASTER_URI=http://'+h3.IP()+ ':11311 && /home/jeb/node_main -config /home/jeb/receiver.xml --tg_time 50 &') print output print "Starting sender" output = h1.cmd('export LD_LIBRARY_PATH=/home/jeb/:$LD_LIBRARY_PATH && export ROS_IP='+h1.IP()+ ' && export ROS_MASTER_URI=http://'+h3.IP()+ ':11311 && /home/jeb/node_main -config /home/jeb/sender.xml --tg_time 50 --max_data_length_bits 6000 &') print output sleep(70) net.stop()
def multiSwitchNet(NS,NH): "Add 1 remote controller, NS switches & NH hosts per switch" port = 6633 net = Mininet(controller=RemoteController, switch=Switch, host=CPULimitedHost, link=TCLink, build=False) k=NS+1; # Add a single controller for all switches remote = addController(net,1,port) lastSwitch = None for i in range(1,NS+1) : # name = 'c%d' % i # remote = addController(N=i,gateID=port) s = addSwitch(net,N=i) host = [addHost(net,n) for n in range(k,k+NH)] for h in host : net.addLink(h,s,bw=10,delay='5ms',loss=0,max_queue_size=1000,use_htb=True) if lastSwitch : net.addLink(s,lastSwitch,bw=10,delay='5ms',loss=0,max_queue_size=1000,use_htb=True) lastSwitch = s port = port + 1 k=k+NH print "Starting network" net.start() print "Testing network" net.pingAll() print "Running CLI" CLI( net ) print "Stopping network" net.stop()
def test_L3EthStar(): topo = L3EthStar(add_attacker=True) net = Mininet( topo=topo, link=TCLink, listenPort=OF_MISC['switch_debug_port']) net.start() net.pingAll() n = L3_NODES for h in range(n - 2): key = 'plc%s' % (h + 1) plc = net.get(key) # get host obj reference by name ok_(plc.IP(), L1_PLCS_IP[key]) ok_(plc.MAC(), PLCS_MAC[key]) histn = net.get('histn') ok_(histn.IP(), L3_PLANT_NETWORK['histn']) ok_(histn.MAC(), OTHER_MACS['histn']) workstn = net.get('workstn') ok_(workstn.IP(), L3_PLANT_NETWORK['workstn']) ok_(workstn.MAC(), OTHER_MACS['workstn']) # alternative way to obtain IP and MAC # params = workstn.params net.stop()
def main(): do_something() sstopo = SingleSwitchTopo(4) mn = Mininet(topo=sstopo) mn.start() mn.pingAll() mn.stop()
def testIt(): topo = SingleSwitch() net = Mininet(topo) net.start() dumpNodeConnections(net.hosts) net.pingAll() net.stop()
class FaucetTaggedAndUntaggedVlanTest(FaucetUntaggedTest): CONFIG = CONFIG_HEADER + """ interfaces: %(port_1)d: tagged_vlans: [100] description: "b1" %(port_2)d: native_vlan: 100 description: "b2" %(port_3)d: native_vlan: 100 description: "b3" %(port_4)d: native_vlan: 100 description: "b4" vlans: 100: description: "mixed" unicast_flood: False """ def setUp(self): self.CONFIG = self.CONFIG % PORT_MAP super(FaucetTaggedAndUntaggedVlanTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=1, n_untagged=3) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_untagged(self): self.net.pingAll()
def intfOptions(): "run various traffic control commands on a single interface" net = Mininet( autoStaticArp=True ) net.addController( 'c0' ) h1 = net.addHost( 'h1' ) h2 = net.addHost( 'h2' ) s1 = net.addSwitch( 's1' ) link1 = net.addLink( h1, s1, cls=TCLink ) net.addLink( h2, s1 ) net.start() # flush out latency from reactive forwarding delay net.pingAll() info( '\n*** Configuring one intf with bandwidth of 5 Mb\n' ) link1.intf1.config( bw=5 ) info( '\n*** Running iperf to test\n' ) net.iperf() info( '\n*** Configuring one intf with loss of 50%\n' ) link1.intf1.config( loss=50 ) info( '\n' ) net.iperf( ( h1, h2 ), l4Type='UDP' ) info( '\n*** Configuring one intf with delay of 15ms\n' ) link1.intf1.config( delay='15ms' ) info( '\n*** Run a ping to confirm delay\n' ) net.pingPairFull() info( '\n*** Done testing\n' ) net.stop()
def perfTest(): topo = TestTopo() net = Mininet(topo=topo, controller=lambda name: RemoteController(name, ip='127.0.0.1'), link=TCLink) net.start() print "Dumping connections" dumpNodeConnections(net.hosts) print "Launching LLDP daemons" for host in net.hosts: host.cmd('../../tiny-lldpd/tlldpd -d -i 1') sleep(3) net.pingAll() net.get('h3').cmd('iperf -sD') net.get('h4').cmd('iperf -sD') net.get('h1').cmd('iperf -c %s > /tmp/h1.txt &' % net.get('h3').IP()) net.get('h2').cmd('sleep 1; iperf -c %s > /tmp/h2.txt &' % net.get('h4').IP()) net.get('h1').cmd('wait $!') net.get('h2').cmd('wait $!') net.get('h1').cmdPrint("awk 'NR == 7 {print $7 $8}' /tmp/h1.txt") net.get('h2').cmdPrint("awk 'NR == 7 {print $7 $8}' /tmp/h2.txt") net.hosts[0].cmd('killall -9 tlldpd') net.hosts[0].cmd('killall -9 iperf') net.stop() cleanup()
def multiSwitchTest(): topo = MultiSwitchTopo(depth=2, fanout=4) #net = Mininet(topo, controller=OVSController) net = Mininet(topo, controller=lambda name: RemoteController(name, ip='192.168.56.1')) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() receivers = ["00:11:22:33:44:00", "00:11:22:33:44:04", "00:11:22:33:44:08", "00:11:22:33:44:0c"] for host in net.hosts: if host.defaultIntf().MAC() in receivers: startLogReceiver(host) else: startLogSender(host) for host in net.hosts: if not (host.defaultIntf().MAC() in receivers): runGenerator(host) for host in net.hosts: if host.defaultIntf().MAC() in receivers: stopLogReceiver(host) else: stopLogSender(host) net.stop()
def main(): start = time() try: topo = NetworkTopo(switch_bw=args.bw_net, host_bw=args.bw_host, switch_delay='%sms' %(args.delay, ), queue_size=23593) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() dumpNodeConnections(net.hosts) net.pingAll() if args.http: test_http(net) else: run_tcp_first(net, args.tcp_n) except: print "-"*80 print "Caught exception. Cleaning up..." print "-"*80 import traceback traceback.print_exc() raise finally: stop_all_iperf() net.stop() Popen("killall -9 top bwm-ng tcpdump cat mnexec; mn -c", shell=True, stderr=PIPE) Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait() stop_tcpprobe() end = time() cprint("Experiment took %s seconds\n" % (end - start), "yellow")
def main(): "Create and run experiment" start = time() topo = ParkingLotTopo(n=args.n) host = custom(CPULimitedHost, cpu=.15) # 15% of system bandwidth link = custom(TCLink, bw=args.bw, delay='1ms', max_queue_size=200) net = Mininet(topo=topo, host=host, link=link) net.start() cprint("*** Dumping network connections:", "green") dumpNetConnections(net) cprint("*** Testing connectivity", "blue") net.pingAll() if args.cli: # Run CLI instead of experiment CLI(net) else: cprint("*** Running experiment", "magenta") run_parkinglot_expt(net, n=args.n) net.stop() end = time() os.system("killall -9 bwm-ng") cprint("Experiment took %.3f seconds" % (end - start), "yellow")
def bbnet(): "Create network and run Buffer Bloat experiment" print "starting mininet ...." # Seconds to run iperf; keep this very high seconds = 3600 start = time() # Reset to known state topo = StarTopo(n=args.n, bw_host=args.bw_host, delay='%sms' % args.delay, bw_net=args.bw_net, maxq=args.maxq, diff=args.diff) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, autoPinCpus=True, controller=OVSController) net.start() dumpNodeConnections(net.hosts) net.pingAll() print args.diff if args.diff: print "Differentiate Traffic Between iperf and wget" os.system("bash tc_cmd_diff.sh") else: print "exec tc_cmd.sh" os.system("bash tc_cmd.sh %s" % args.maxq) sleep(2) ping_latency(net) print "Initially, the delay between two hosts is around %dms" % (int(args.delay)*2) h2 = net.getNodeByName('h2') h1 = net.getNodeByName('h1') h1.cmd('cd ./http/; nohup python2.7 ./webserver.py &') h1.cmd('cd ../') h2.cmd('iperf -s -w 16m -p 5001 -i 1 > iperf-recv.txt &') CLI( net ) h1.cmd("sudo pkill -9 -f webserver.py") h2.cmd("rm -f index.html*") Popen("killall -9 cat", shell=True).wait()
def run_exercise(): #Create and start a new network with our custom topology topo = TLSTopo() net = Mininet(topo=topo) net.start() net.pingAll() processes = [] #Start Nginx HTTP-server processes.append(net["Server-1"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_1.conf')) processes.append(net["Server-2"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_2.conf')) processes.append(net["Server-3"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_3.conf')) processes.append(net["Server-4"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_4.conf')) processes.append(net["Server-5"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_5.conf')) #Open wireshark processes.append(net["A"].popen('wireshark')) #Open terminals processes.append(makeTerms([net["A"]], title="Student terminal")[0]) raw_input("Press Enter to exit....") for process in processes: process.kill() Cleanup.cleanup()
def perTest(): "Specify performance parameters for the links" # Between core and aggregation switches linkopts1 = dict(bw=10, delay='5ms', loss=1, max_queue_size=1000, use_htb=True) # Between aggregation and edge switches linkopts2 = dict(bw=10, delay='5ms', loss=1, max_queue_size=1000, use_htb=True) # Between edge switches and hosts linkopts3 = dict(bw=10, delay='5ms', loss=1, max_queue_size=1000, use_htb=True) "Create and test a simple network" topo = CustomTopo(linkopts1=linkopts1, linkopts2=linkopts2, linkopts3=linkopts3, fanout=2) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() print "Testing bandwidth between h1 with h2, h3 and h5" h1, h2 = net.get('h1', 'h2') net.iperf( ( h1, h2 ) ) h1, h3 = net.get('h1', 'h3') net.iperf( ( h1, h3 ) ) h1, h5 = net.get('h1', 'h5') net.iperf( ( h1, h5 ) ) h1, h7 = net.get('h1', 'h7') net.iperf( ( h1, h7 ) ) net.stop()
def Topology(): net = Mininet(controller= Cotroller, switch = OVSSwitch) print "******** Creating controller" c1= net.addController ('c1', ip='127.0.0.1', port = 6633) print "******** Creating switches" s1 = net.addSwitch( 's1' ) s2 = net.addSwitch( 's2' ) print "******** Creating host" h1 = net.addHost( 'h1' ) h2 = net.addHost( 'h2' ) print "******** Creating links" net.addLink (h1,s1) net.addLink (h2,s2) net.addLink (s1,s2) print "******* Starting network" net.build() c1.start s1.start([c1]) s2.start([c2]) net.start() print "******* Testing network" net.pingAll() print "****** Running CLI" CLI(net)
def testRemoteNet( remote='ubuntu2', link=RemoteGRELink ): "Test remote Node classes" print( '*** Remote Node Test' ) net = Mininet( host=RemoteHost, switch=RemoteOVSSwitch, link=link ) c0 = net.addController( 'c0' ) # Make sure controller knows its non-loopback address Intf( 'eth0', node=c0 ).updateIP() print( "*** Creating local h1" ) h1 = net.addHost( 'h1' ) print( "*** Creating remote h2" ) h2 = net.addHost( 'h2', server=remote ) print( "*** Creating local s1" ) s1 = net.addSwitch( 's1' ) print( "*** Creating remote s2" ) s2 = net.addSwitch( 's2', server=remote ) print( "*** Adding links" ) net.addLink( h1, s1 ) net.addLink( s1, s2 ) net.addLink( h2, s2 ) net.start() print( 'Mininet is running on', quietRun( 'hostname' ).strip() ) for node in c0, h1, h2, s1, s2: print( 'Node', node, 'is running on', node.cmd( 'hostname' ).strip() ) net.pingAll() CLI( net ) net.stop()
def run_debug(): if not os.path.exists(TEST_DIR): os.mkdir(TEST_DIR) logfile = '%s/tp_log' % TEST_DIR init_log(logfile, 'Throughput (Mbps)\n') topo = BurstTestTopo() net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() dumpNetConnections(net) net.pingAll() """ verify_latency(net) verify_bandwidth(net) verify_throughput(net) """ start_senders(net, n_senders=1) start_receiver(net, n_senders=1, sim_duration=3, max_window_list=[SIM2_MAX_WINDOW_LOW]) rates = get_rates('s1-eth0', nsamples=200, period=0.01, wait=1.0) for z in rates: write_to_log(logfile, str(z) + '\n') net.stop()
def test(): op = OptionParser() op.add_option('--cli', action="store_true", dest="cli") op.add_option('--ports', action="store", dest="ports") op.set_defaults(cli=False, ports=2) options, args = op.parse_args() topo = SingleSwitchTopo(int(options.ports)) NetASMSwitch.CTL_ADDRESS = "127.0.0.1" NetASMSwitch.CTL_PORT = 7791 net = Mininet(topo, switch=NetASMSwitch, autoSetMacs=True, controller=lambda name: RemoteController(name)) NetASMSwitch.start_datapath(net.switches, address="127.0.0.1", port=6633) net.start() if options.cli: CLI(net) else: net.pingAll() net.stop() NetASMSwitch.stop_datapath()
def bwtest(cpuLimits, period_us=100000, seconds=5): """Example/test of link and CPU bandwidth limits cpu: cpu limit as fraction of overall CPU time""" topo = TreeTopo(depth=1, fanout=2) results = {} for sched in "rt", "cfs": print "*** Testing with", sched, "bandwidth limiting" for cpu in cpuLimits: host = custom(CPULimitedHost, sched=sched, period_us=period_us, cpu=cpu) try: net = Mininet(topo=topo, host=host) except: info("*** Skipping host %s\n" % sched) break net.start() net.pingAll() hosts = [net.getNodeByName(h) for h in topo.hosts()] client, server = hosts[0], hosts[-1] server.cmd("iperf -s -p 5001 &") waitListening(client, server, 5001) result = client.cmd("iperf -yc -t %s -c %s" % (seconds, server.IP())).split(",") bps = float(result[-1]) server.cmdPrint("kill %iperf") net.stop() updated = results.get(sched, []) updated += [(cpu, bps)] results[sched] = updated return results
def bufferbloat(**kwargs): # Linux uses CUBIC-TCP by default that doesn't have the usual sawtooth # behaviour. For those who are curious, replace reno with cubic # see what happens... # sysctl -a | grep cong should list some interesting parameters. os.system("sysctl -w net.ipv4.tcp_congestion_control=reno") # create the topology and network topo = BBTopo(int(kwargs['queue_size']), int(kwargs['ping_RTT'])) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, controller= OVSController) net.start() # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() # Start all the monitoring processes start_tcpprobe("cwnd.txt") # TODO: Start monitoring the queue sizes. Since the switch I # created is "s0", I monitor one of the interfaces. Which # interface? The interface numbering starts with 1 and increases. # Depending on the order you add links to your network, this # number may be 1 or 2. Ensure you use the correct number. # qmon = start_qmon(...) qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % ".") # TODO: Start iperf, pings, and the webserver. # start_iperf(net), ... start_iperf(net, kwargs['congestion_window']) start_webserver(net) start_ping(net) # TODO: measure the time it takes to complete webpage transfer # from h1 to h2 (say) 4-5 times. Hint: check what the following # command does: curl -o /dev/null -s -w %{time_total} google.com # Now use the curl command to fetch webpage from the webserver you # spawned on host h1 (not from google!) print "starting timing tester" timing_results = timing_tester(net) # TODO: compute average (and standard deviation) of the fetch # times. You don't need to plot them. Just print them # here and explain your observations in the Questions part # in Part 2, where you analyze your measurements. print timing_results[0] print "Ave fetching time: %.4f" % numpy.average(numpy.array(timing_results).astype(numpy.float)) print "std dev. of fetching times: %.4f" % numpy.std(numpy.array(timing_results).astype(numpy.float)) # Stop probing stop_tcpprobe() qmon.terminate() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def test_topo(): topo = SwatTopo() net = Mininet(topo=topo) net.start() net.pingAll() net.stop()
def simpleTest(): #"create and test simple network" topo = SingleSwitchTopo(n=4) net = Mininet(topo) net.start() print "dumping host connection" net.pingAll() net.stop()
def perfTest(): topo = MultiSwitchTopo(n,m) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() print "Testing network connectivity" net.pingAll() net.stop()
def testRemoteTopo( link=RemoteGRELink ): "Test remote Node classes using Mininet()/Topo() API" topo = LinearTopo( 2 ) net = Mininet( topo=topo, host=HostPlacer, switch=SwitchPlacer, link=link, controller=ClusterController ) net.start() net.pingAll() net.stop()
def mainTopo(): #Define Initial Topology on Mininet os.system('mn -c') print( '===========================================================================' ) print( '===========================================================================' ) net = Mininet(link=TCLink, host=CPULimitedHost) net.start() Cl1 = net.addHost('Cl1', ip='192.168.1.2/29') Se2 = net.addHost('Se2', ip='192.168.2.2/29') Ro1 = net.addHost('Router1') net.addLink(Cl1, Ro1, bw=1) net.addLink(Se2, Ro1, bw=100, max_queue_size=maxQ) #, max_queue_size = X net.build() Ro1.cmd('ifconfig Router1-eth0 0') Ro1.cmd('ifconfig Router1-eth1 0') Ro1.cmd('ifconfig Router1-eth0') Ro1.cmd('ifconfig Router1-eth1') Ro1.cmd('ifconfig Router1-eth0 hw ether mac=00:00:00:00:02:01') Ro1.cmd('ifconfig Router1-eth1 hw ether mac=00:00:00:00:02:02') Ro1.cmd("ip addr add 192.168.1.1/29 brd + dev Router1-eth0") Ro1.cmd("ip addr add 192.168.2.1/29 brd + dev Router1-eth1") Ro1.cmd('sysctl -w net.ipv4.ip_forward=1') Cl1.cmd('ip route add default via 192.168.1.1') Se2.cmd('ip route add default via 192.168.2.1') #Starting Scenatrio print( '=================STARTING SCENARIO 1 (Various Queue)=======================' ) print( '===========================================================================' ) Cl1.cmdPrint('sysctl net.ipv4.tcp_congestion_control') Se2.cmdPrint('sysctl net.ipv4.tcp_congestion_control') print("Queue Size", maxQ) ccName = subprocess.check_output( 'cat /proc/sys/net/ipv4/tcp_congestion_control', shell=True) ccName = ccName.replace("\n", "") print( '===========================================================================' ) net.pingAll() print( '===========================================================================' ) Se2.cmd('iperf -s &') Se2.cmd('iperf -s > dataResult/examine-X/' + str(ccName) + '_' + str(maxQ) + '_LL_iperf-server.txt &') print(' Server Iperf Started') Se2.cmd('python -m SimpleHTTPServer &') print( '=========================================================================' ) #### STARTING EXAMINE #### print(' TCPDUMP Started Longlived for 65 s Please Wait') print(' Iperf Started') Cl1.cmd( 'tcpdump -G 15 -W 1 -w /home/reghn/Documents/pcapngs/_LL_.pcapng -i Cl1-eth0 &' ) #62s Cl1.cmd( 'iperf -c 192.168.2.2 -t 15 -i 1 > dataResult/examine-X/_LL_iperfRests.txt &' ) #60s Cl1.cmd('ping 192.168.2.2 -c 15 > dataResult/examine-X/_LL_rttRests.txt ' ) #61s #pidCode = subprocess.check_output('pidof tcpdump', shell=True) #pidCode = pidCode.replace("\n","") #Cl1.cmd('kill '+str(pidCode)+'') #### rename file #### os.system( 'mv /home/reghn/Documents/pcapngs/_LL_.pcapng /home/reghn/Documents/pcapngs/' + str(ccName) + '_' + str(maxQ) + '_LL_.pcapng') os.system( 'mv dataResult/examine-X/_LL_iperfRests.txt dataResult/examine-X/' + str(ccName) + '_' + str(maxQ) + '_LL_iperfRests.txt') os.system( 'mv dataResult/examine-X/_LL_rttRests.txt dataResult/examine-X/' + str(ccName) + '_' + str(maxQ) + '_LL_rttRests.txt') print( '=========================================================================' ) Se2.cmd('python -m SimpleHTTPServer &') print(' Python HTTP Server Start') print( '=========================================================================' ) os.system( 'echo TCPDUMP Shortlived Started for 10 s Please Wait') Cl1.cmd( 'tcpdump -G 15 -W 1 -w /home/reghn/Documents/pcapngs/_SL_.pcapng -i Cl1-eth0 &' ) Cl1.cmdPrint('wget 192.168.2.2:8000') print(" Processing all file's ") os.system('scrot --delay 2 restSL.png') os.system( 'mv /home/reghn/Documents/pcapngs/_SL_.pcapng /home/reghn/Documents/pcapngs/' + str(ccName) + '_' + str(maxQ) + '_SL_.pcapng') os.system('mv restSL.png restSL' + str(ccName) + '' + str(maxQ) + '') print( '=========================================================================' ) time.sleep(10) # CLI(net) net.stop()
def myNet(): global brokennode brokennode = sys.argv[1] global PP PP = PPBP() PP.init_exp() PP.init_pareto() #print(PP.exp_set) #print(PP.pareto_set) #A='python on_off_client.py 192.168.123.1 192.168.123.32 3121412 '+PP.exp_set+' '+PP.pareto_set+' 123 &' #print(len(A)) #sys.exit(0) #print(A) ###PPBP is ok net = Mininet( topo=GeneratedTopo(), controller=lambda a: RemoteController(a, ip='127.0.0.1', port=6633), host=CPULimitedHost, link=TCLink) # net=Mininet(topo=GeneratedTopo(),host=CPULimitedHost,link=TCLink) # net.addController('c0', RemoteController, ip="127.0.0.1",port=6633) print("Hello") # net.start() print("DUmping host coonnections") # dumpNodeConnections(net.hosts) net.start() print("host is OKJ") # sleep(10) while 'is_connected' not in quietRun('ovs-vsctl show'): sleep(1) print('.') print("Test pingall") print(type(net.hosts)) print(type(net.links)) global neighbors neighbors = {} global mac mac = {} f_neighbors = open('neighbors.txt', 'w') f_mac = open('mac.txt', 'w') for item in net.hosts: list_host = [] host_mac = {} print(type(item.name)) print(item.name) neighbors[item.name] = list_host mac[item.name] = host_mac for item in net.switches: list_switch = [] switch_mac = {} print(type(type(item.name))) print(item.name) neighbors[item.name] = list_switch mac[item.name] = switch_mac dic_temp = {} mac_all = {} for item in net.links: count1 = item.intf1.name.find('-') count2 = item.intf2.name.find('-') print(item.intf1.name[:count1]) print(item.intf2.name[:count2]) if item.intf1.name not in dic_temp: dic_temp[item.intf1.name] = 1 if item.intf2.name not in dic_temp: dic_temp[item.intf2.name] = 1 node1 = item.intf1.name[:count1] node2 = item.intf2.name[:count2] neighbors[node1].append(node2) neighbors[node2].append(node1) mac[node1][node2] = item.intf1.mac mac[node2][node1] = item.intf2.mac mac_all[item.intf1.name] = item.intf1.mac mac_all[item.intf2.name] = item.intf2.mac print(item.intf1.mac) print(item.intf2.mac) print(type(item.intf1.mac)) print(type(item.intf2.mac)) f_mac_all = open("mac_all.txt", 'w') for item in mac_all: f_mac_all.write(item + '\t' + mac_all[item] + '\t' + '\n') for item in neighbors: print(item + '\t') for temp in neighbors[item]: print(temp + '\t') f_neighbors.write(item + '\t' + temp + '\n') print('\n') for item in mac: print(item + '\t') for temp in mac[item]: print(temp + '\t') print(mac[item][temp] + '\n') f_mac.write(item + '\t' + temp + '\t' + mac[item][temp] + '\n') print('\n') f_neighbors.close() f_mac.close() """ h0=net.hosts[0] for item in net.hosts[1:]: h0.cmdPrint('ping -Q 0x64 -c 1 '+item.IP()) """ net.pingAll() # sys.exit(0) # pingtest(net,count_node) f_temp = open('tcplook.sh', 'w') for item in dic_temp: f_temp.write('ethtool -K ' + item + ' tx off \n') for item in dic_temp: f_temp.write('sudo tcpdump -i ' + item + ' -w ' + item + '.pcap' + ' &' + '\n') f_temp.close() os.system('sh -x tcplook.sh') # sleep(2) samples = 1 for i in range(samples): print(i) # event=random.randint(0,2) event = 0 h0 = net.hosts[0] h1 = net.hosts[1] h2 = net.hosts[2] # h0.cmdPrint('ping -Q 0x64 -c 1 '+h1.IP()) # sleep(3) if (event == 0): # normal(net,i) traffic(net, i) if (event == 1): brokenlink(net, i) if (event == 2): brokenswitch(net, i, brokennode) sleep(1) net.stop()
def main(): # Create Topology topo = DumbbellTopo() net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() precise_time_str = "%m-%d-%H:%M:%S:%f" time_stamp = datetime.datetime.now().strftime('%m-%d-%H%M') # Test connectivity print("Dumping host connections") dumpNodeConnections(net.hosts) print("Testing network connectivity") net.pingAll() server, client = net.get('h1', 'h2') data_root = os.path.join('/', 'vagrant', 'logs', 'sanatisation', datetime.datetime.now().strftime('%m-%d-%H%M')) if os.path.isdir(data_root): print('Data folder already exists. Exiting...') net.stop() sys.exit(1) else: os.mkdir(data_root) s_name = os.path.join(data_root, 'server.pcap') c_name = os.path.join(data_root, 'client.pcap') print('ethtool -K ' + str(server.intf()) + ' gso off') # Disable segment offloading for hosts server.cmd('ethtool -K ' + str(server.intf()) + ' gso off') server.cmd('ethtool --offload ' + str(server.intf()) + ' tso off') client.cmd('ethtool -K ' + str(client.intf()) + ' gso off') client.cmd('ethtool --offload ' + str(client.intf()) + ' tso off') services = [] server_pcap = server.popen('tcpdump -w %s -z gzip' % s_name) services.append(server_pcap) client_pcap = client.popen('tcpdump -w %s -z gzip' % c_name) services.append(client_pcap) server_log_name = os.path.join(data_root, "nginx_access.log") # optionally add buffer=32k (or some other big value for access_log) config_str = "events { } http { log_format tcp_info '$time_local, $msec, \"$request\", $status, $tcpinfo_rtt, $tcpinfo_rttvar, \"$tcpinfo_snd_cwnd\", $tcpinfo_rcv_space, $body_bytes_sent, \"$http_referer\", \"$http_user_agent\"'; server { listen " + server.IP( ) + "; root /vagrant; access_log " + server_log_name + " tcp_info;} }" with open('nginx-conf.conf', 'w') as f: f.write(config_str) wd = str(server.cmd('pwd'))[:-2] # Start HTTP server conf_path = os.path.join(wd, 'nginx-conf.conf') print(conf_path) # Start monitoring the q_len at all routers # for intf in ['s1-eth1', 's1-eth2', 's2-eth1', 's2-eth2']: # monitor_path = os.path.join(data_root, intf) # monitor_qlen(intf, fname=monitor_path) # Iperf client pushes data onto the server -> server should act as client and vice versa iperf_server = client.popen("iperf3 -s") services.append(iperf_server) print('server started') # server_out = server.cmd("sudo nginx -c " + conf_path + " &") iperf_out_path = os.path.join(data_root, "iperf_client.json") # print() iperf_client = server.popen("iperf3 -c " + client.IP() + " -t 240 --logfile " + iperf_out_path + " -J") services.append(iperf_client) # client_proc = client.popen("wget -q --delete-after http://" + server.IP() + "/form_field_value.ibd") # client_proc.wait() iperf_client.wait() global done done = True # print('Starting server ' + datetime.datetime.now().strftime(precise_time_str)) # server_proc = server.popen('iperf3 -s') # services.insert(0, server_proc) # print('Starting client ' + datetime.datetime.now().strftime(precise_time_str)) # client_proc = client.popen('iperf3 -c ' + server.IP() + ' -t 120 --logfile ' + os.path.join(data_root, 'client.json') + ' -J') # client_proc.wait() print('Terminating... ' + datetime.datetime.now().strftime(precise_time_str)) time.sleep(3) for s in services: try: s.terminate() except OSError as e: print(repr(e)) print("Output saved to: " + data_root) net.iperf((server, client)) net.stop()
my_switches = [] for x in range (1,5): my_switches.append('s'+str(x)) my_static_hosts = [] for x in range(21, 25): my_static_hosts.append(net.nameToNode['h'+str(x)]) my_hosts = [] for x in range (1, last_host_that_can_move+1): my_hosts.append(net.nameToNode['h'+str(x)]) my_moves=[] print "***** Baseline: Pinging all pairs" ping_output = net.pingAll() def show_topology(): output("------- Topology -------\n") for s in net.switches: output(s.name, '<->') for intf in s.intfs.values(): name = s.connection.get(intf, (None, 'Unknown ') ) [ 1 ] output( ' %s' % name ) output('\n') output('\n') def sleep_with_dots(n): for n in range(1,n+1): sys.stdout.write("%d." % n) sys.stdout.flush()
def do_POST(self): global topo global net content_length = int(self.headers['Content-length']) post_data = json.loads(self.rfile.read(content_length)) print(post_data) self._set_headers() print(self.path[0:6]) if (self.path[0:5] == '/host'): print('host') savedTopo = topo try: # Check if host field is available name = post_data["name"] savedTopo.addHost(name) try: # Switch link specified switch = post_data["switch"] savedTopo.addLink(name, switch) except: # No switch link specified pass try: res = restart_net(savedTopo) except: print("Error restarting network /host") except: print("No Host name specified /host") self.wfile.write( "Done creating host %s with %s%% of packets dropped after pingAll()" % (name, res)) elif (self.path[0:7] == '/switch'): print('switch') savedTopo = topo try: name = post_data["name"] savedTopo.addSwitch(name) try: res = restart_net_switch(savedTopo) except: print("Error restarting network /switch") except: print("No Switch name specified") self.wfile.write("Done creating switch %s" % name) elif (self.path[0:5] == '/link'): print('link') savedTopo = topo try: link1 = post_data["link_one"] link2 = post_data["link_two"] savedTopo.addLink(link1, link2) try: res = restart_net(savedTopo) except: print("Error restarting network /link") except: print("Invalid link specification /link") self.wfile.write( "Done creating link between %s and %s, with %s%% of packets dropped after pingAll()" % (link1, link2, res)) elif (self.path[0:4] == '/new'): net.stop() net = None topo = None cleanup = Cleanup.cleanup topo = EmptyTopo() for node in post_data: if (node == 'hosts'): for i in post_data[node]: topo.addHost(i) elif (node == 'switches'): for i in post_data[node]: topo.addSwitch(i) elif (node == 'controllers'): print("not created") elif (node == 'links'): for i in post_data[node]: topo.addLink(i['link_one'], i['link_two']) else: print("error") net = Mininet(topo, controller=OVSController, autoSetMacs=True) net.start() print("Dumping host connections") dumpNodeConnections(net.hosts) print("Testing network connectivity") res = net.pingAll() print('bulk new') self.wfile.write( "Done creating bulk data, with %s%% of packets dropped after pingAll()" % (res)) elif (self.path[0:1] == '/'): savedTopo = topo for node in post_data: if (node == 'hosts'): for i in post_data[node]: savedTopo.addHost(i) elif (node == 'switches'): for i in post_data[node]: savedTopo.addSwitch(i) elif (node == 'controllers'): print("not created") elif (node == 'links'): for i in post_data[node]: savedTopo.addLink(i['link_one'], i['link_two']) else: print("error") res = restart_net(savedTopo) print('bulk') self.wfile.write( "Done creating bulk data, with %s%% of packets dropped after pingAll()" % (res)) else: print("ERROR POSTING DATA TO %s" % self.path)
class Solar(object): """ Create a tiered topology from semi-scratch in Mininet """ def __init__(self, cname='onos', cips=['192.168.56.1'], islands=3, edges=2, hosts=2): """Create tower topology for mininet""" # We are creating the controller with local-loopback on purpose to avoid # having the switches connect immediately. Instead, we'll set controller # explicitly for each switch after configuring it as we want. self.ctrls = [ RemoteController(cname, cip, 6633) for cip in cips ] self.net = Mininet(controller=RemoteController, switch = OVSKernelSwitch, build=False) self.cips = cips self.spines = [] self.leaves = [] self.hosts = [] for ctrl in self.ctrls: self.net.addController(ctrl) # Create the two core switches and links between them c1 = self.net.addSwitch('c1',dpid='1111000000000000') c2 = self.net.addSwitch('c2',dpid='2222000000000000') self.spines.append(c1) self.spines.append(c2) self.net.addLink(c1, c2) self.net.addLink(c2, c1) for i in range(1, islands + 1): sc = self.createSpineClump(i, edges, hosts) self.net.addLink(c1, sc[0]) self.net.addLink(c2, sc[0]) self.net.addLink(c1, sc[1]) self.net.addLink(c2, sc[1]) def createSpineClump(self, island, edges, hosts): """ Creates a clump of spine and edge switches with hosts""" s1 = self.net.addSwitch('s%1d1' % island,dpid='00000%1d0100000000' % island) s2 = self.net.addSwitch('s%1d2' % island,dpid='00000%1d0200000000' % island) self.net.addLink(s1, s2) self.net.addLink(s2, s1) for i in range(1, edges + 1): es = self.createEdgeSwitch(island, i, hosts) self.net.addLink(es, s1) self.net.addLink(es, s2) self.spines.append(s1) self.spines.append(s2) clump = [] clump.append(s1) clump.append(s2) return clump def createEdgeSwitch(self, island, index, hosts): """ Creates an edge switch in an island and ads hosts to it""" sw = self.net.addSwitch('e%1d%1d' % (island, index),dpid='0000000%1d0000000%1d' % (island, index)) self.leaves.append(sw) for j in range(1, hosts + 1): host = self.net.addHost('h%d%d%d' % (island, index, j),ip='10.%d.%d.%d' % (island, index, j)) self.net.addLink(host, sw) self.hosts.append(host) return sw def run(self): """ Runs the created network topology and launches mininet cli""" self.net.build() self.net.start() CustomCLI(self.net) self.net.stop() def pingAll(self): """ PingAll to create flows - for unit testing """ self.net.pingAll() def stop(self): "Stops the topology. You should call this after run_silent" self.net.stop()
def perfTest(): "Cria a rede e executa o teste de performace " """Lembrando que os testes disponiveis deve ser trocado o parametro abaixo para cada tipo de toplogia a ser testada""" topo = LeafSpine(n=4) test = 'LeafSpine' run_test = 2 #Parametro 1 testa com Iperf largura de banda e 2 testa o ping net = Mininet(topo=topo, controller=RemoteController, link=TCLink, ipBase='172.16.0.0/16') net.start() seconds = 100 net.waitConnected() print "Espara a arquitetura convergir" net.pingAll() host = {} print "Comeca os testes" if (test == 'Fattree' or test == 'FatTreeNoLink' or test == 'FatTreeTopotest'): max_host = 100 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test == 'LeafSpine' or test =='LeafSpinenolink' or test == 'Facebook4post' or test == 'FacebookNewFabric' ): print "*** Testes arquiteturas baseadas em Leaf Spine ***" max_host = 100 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test =='HybridFattree' or test == 'HybridFattreenolink'): print "***Testes com Hybrid Fattree" max_host = 100 for x in range(0, max_host): host_name = 'h' +str(x) print "Adding %s" % host_name host[x] = net.get(host_name) if (run_test == 1): print " Teste com IPERF largura de banda" if ((max_host%2) == 0): for x in range(0, (max_host/2)): src = host[x] dst = host[(max_host-1)-x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) for x in range(0, (max_host/2)): dst = host[x] src = host[(max_host-1)-x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) else: dst = host[0] src = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[1] src = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[2] src = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[3] src = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[6] src = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[7] src = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[8] src = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[12] src = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[13] src = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[18] src = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) src = host[0] dst = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[1] dst = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[2] dst = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[3] dst = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[6] dst = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[7] dst = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[8] dst = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[12] dst = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[13] dst = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[18] dst = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) elif (run_test ==2 and test =='DcellNoLoop'): print "Testes com Ping latencia" outfiles, errfiles = {}, {} packetsize = 1454 #max packet size 1472. MTU set to 1500 bottom = 0 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-5): host[h].cmdPrint('ping', host[h+5].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[bottom].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) bottom = bottom +1 print "Monitora a saida dos resultados para", seconds, "segundos ou milesegundos" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) sleep(11) elif (run_test ==2 and test !='HybridFatTreeNoLink'): print "teste com Ping latencia especifico" outfiles, errfiles = {}, {} packetsize = 54 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-1): host[h].cmdPrint('ping', host[h+1].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[0].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) print "Monitora a saida dos resultados para ", seconds, "segundos ou milisegundos" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) sleep(11) print "Final dos testes" net.stop()
def main(duration_sec, delay_sec, delay_ms, cc_alg, results_path, interactive=False, use_linux_router=True, use_asym=False): topo = DumbbellTopo(delay_ms=delay_ms, use_linux_router=use_linux_router, use_asym=use_asym) net = Mininet(topo=topo, link=TCLink, autoStaticArp=True) net.start() try: if use_asym: # Update our access router interfaces to limit their transmit speeds to only 252 Mbps. Note that # this has to come after we start the network because during testing it seemed that net.start() # reloaded the original bandwidth that was set when the associated link was first created. ars = (net["ar1"], net["ar2"]) ar_neighbors = ((net["s1"], net["s2"], net["bb1"]), (net["r1"], net["r2"], net["bb2"])) for ar, neighbors in izip(ars, ar_neighbors): for neighbor in neighbors: # This basically returns a list of pairs where for each pair the first item is the # interface on self and the second item is the interface on the node passed to # connectionsTo. # # According to the NIST study, bandwidth on the access router interfaces is 252 Mbps and # queue size is 20% of bandwidth delay product. # ar_iface, _ = ar.connectionsTo(neighbor)[0] ar_iface.config( bw=DumbbellTopo.ACCESS_ROUTER_BANDWIDTH_MBPS, max_queue_size=topo.access_router_queue_size) # TCLink ignores any sort of IP address we might specify for non-default interfaces when we are # creating links via Topo.addLink. This reassigns the addresses we want for those interfaces on # our backbone routers. We also add routing rules to each backbone router so that each router # can forward traffic to the subnet they are not directly connected to. if use_linux_router: net["bb1"].intf("bb1-eth1").setIP("10.0.1.1/24") net["bb2"].intf("bb2-eth1").setIP("10.0.2.1/24") net["bb1"].cmd( "route add -net 10.0.2.0 netmask 255.255.255.0 gw 10.0.0.2 dev bb1-eth0" ) net["bb2"].cmd( "route add -net 10.0.1.0 netmask 255.255.255.0 gw 10.0.0.1 dev bb2-eth0" ) info("Dumping host connections\n") dumpNodeConnections(net.hosts) info("Dumping net connections\n") dumpNetConnections(net) # Get rid of initial delay in network. net.pingAll() if interactive: CLI(net) else: # Restart tcp_probe. print "Restarting tcp_probe" subprocess.call('modprobe -r tcp_probe', shell=True) subprocess.call('modprobe tcp_probe full=1', shell=True) read_tcp_probe_command = 'dd if=/proc/net/tcpprobe of=%s' % results_path subprocess.call('%s &' % read_tcp_probe_command, shell=True) try: # Run one iperf stream between r1 and s1 and another between r2 and s2. print "Running iperf tests" print "Sender 1 duration: %d" % duration_sec print "Sender 2 duration: %d" % (duration_sec - delay_sec) print "Sender 2 delay: %d" % delay_sec r1_output = "r1-output-%d-%s.txt" % (delay_ms, cc_alg) r2_output = "r2-output-%d-%s.txt" % (delay_ms, cc_alg) s1_output = "s1-output-%d-%s.txt" % (delay_ms, cc_alg) s2_output = "s2-output-%d-%s.txt" % (delay_ms, cc_alg) # 1500 == MTU. #iperf_window = DumbbellTopo.ACCESS_ROUTER_BANDWIDTH_PPMS * delay_ms * 1500 iperf_window = DumbbellTopo.HOST_BANDWIDTH_PPMS * delay_ms * 1500 print "Iperf window (bytes): %d" % iperf_window net["r1"].sendCmd('iperf -s -p 5001 -w %d &> %s' % (iperf_window, r1_output)) net["r2"].sendCmd('iperf -s -p 5002 -w %d &> %s' % (iperf_window, r2_output)) net["s1"].sendCmd( 'iperf -c %s -p 5001 -i 1 -w %d -t %d -Z %s &> %s' % (net["r1"].IP(), iperf_window, duration_sec, cc_alg, s1_output)) # Delay the second sender by a certain amount and then start it. time.sleep(delay_sec) net["s2"].sendCmd( 'iperf -c %s -p 5002 -i 1 -w %d -t %d -Z %s &> %s' % (net["r2"].IP(), iperf_window, duration_sec - delay_sec, cc_alg, s2_output)) # Wait for all iperfs to close. On server side, we need to send sentinel to output for # waitOutput to return. net["s2"].waitOutput() net["s1"].waitOutput() net["r2"].sendInt() net["r2"].waitOutput() net["r1"].sendInt() net["r1"].waitOutput() print "Completed iperf tests" finally: # Stop tcp_probe. print "Stopping tcp_probe" subprocess.call('pkill -f "%s"' % read_tcp_probe_command, shell=True) subprocess.call('modprobe -r tcp_probe', shell=True) finally: net.stop()
def run_exercise(): #Create and start a new network with our custom topology topo = DDoSTopo() net = Mininet(topo=topo) net.start() #Configure switch so that packets reach the right port (to prevent l2 learning from affecting the exercise) net["s1"].dpctl("del-flows") net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=1,nw_dst=10.0.0.1,actions=output:1,mod_vlan_vid:11,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=1,nw_dst=10.0.0.2,actions=output:2,mod_vlan_vid:11,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=1,nw_dst=10.0.0.3,actions=output:3,mod_vlan_vid:11,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=2,nw_dst=10.0.0.1,actions=output:1,mod_vlan_vid:12,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=2,nw_dst=10.0.0.2,actions=output:2,mod_vlan_vid:12,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=2,nw_dst=10.0.0.3,actions=output:3,mod_vlan_vid:12,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=3,nw_dst=10.0.0.1,actions=output:1,mod_vlan_vid:13,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=3,nw_dst=10.0.0.2,actions=output:2,mod_vlan_vid:13,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=3,nw_dst=10.0.0.3,actions=output:3,mod_vlan_vid:13,output:4" ) #Verify connectivity net.pingAll() processes = [] #Start BIND DNS-server processes.append(net["B"].popen( 'named', '-g', '-c', '/home/vagrant/assignments/DNS/config/named.conf')) #Open terminals processes.append(makeTerms([net["A"]], title="Attacker terminal")[0]) processes.append(makeTerms([net["D"]], title="Capture terminal")[0]) raw_input("Press Enter to exit....") for process in processes: process.kill() Cleanup.cleanup()
def bufferbloat(): if not os.path.exists(args.dir): os.makedirs(args.dir) os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong) # Cleanup any leftovers from previous mininet runs cleanup() topo = BBTopo() net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() # Start all the monitoring processes start_tcpprobe("cwnd.txt") # TODO: Start monitoring the queue sizes. Since the switch I # created is "s0", I monitor one of the interfaces. Which # interface? # The interface numbering starts with 1 and increases. # Depending on the order you add links to your network, this # number may be 1 or 2. Ensure you use the correct number. # qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % args.dir) # TODO: Start iperf, webservers, etc. iperf_proc = Process(target=start_iperf, args=(net, )) ping_proc = Process(target=start_ping, args=(net, )) iperf_proc.start() ping_proc.start() start_webserver(net) # Hint: The command below invokes a CLI which you can use to # debug. It allows you to run arbitrary commands inside your # emulated hosts h1 and h2. # # CLI(net) # TODO: measure the time it takes to complete webpage transfer # from h1 to h2 (say) 3 times. # Hint: check what the following # command does: curl -o /dev/null -s -w %{time_total} google.com # Now use the curl command to fetch webpage from the webserver you # spawned on host h1 (not from google!) # Hint: have a separate function to do this and you may find the # loop below useful. measurements = [] start_time = time() h1 = net.get("h1") h2 = net.get("h2") while True: # do the measurement (say) 3 times. measurements.append(get_timings(net, h1, h2)) sleep(1) now = time() delta = now - start_time if delta > args.time: break print "%.1fs left..." % (args.time - delta) # TODO: compute average (and standard deviation) of the fetch times. # You don't need to plot them. Just note it in your # README and explain. print "Writing results..." f = open("./results.txt", "w+") f.write("average: %s \n" % mean(measurements)) f.write("std dev: %s \n" % stdev(measurements)) f.close() stop_tcpprobe() if qmon is not None: qmon.terminate() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def perfTest(argv): type = '' numHosts = 2 depth = 2 fanout = 2 try: opts, args = getopt.getopt(argv, "ht:n:d:f:", ["type=", "hosts=", "depth=", "fanout="]) except getopt.GetoptError: print 'topo_mininet.py -t <topo> -n <hosts>' print 'topo_mininet.py -t <topo> -d <depth> -f <fanout>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'topo_mininet.py -t <topo> -h <hosts>' print 'topo_mininet.py -t <topo> -d <depth> -f <fanout>' sys.exit() elif opt in ("-t", "--type"): type = arg elif opt in ("-n", "--hosts"): numHosts = arg elif opt in ("-d", "--depth"): depth = arg elif opt in ("-f", "--fanout"): fanout = arg print 'type - ', type if type == 'tree': print 'depth - ', depth print 'fanout - ', fanout else: print 'hosts - ', numHosts "Create network and run simple performance test" if type == 'linear': print 'generating linear topology' topo = LinearTopo(k=int(numHosts)) elif type == 'single': print 'generating single switch topology' topo = SingleSwitchTopo(k=int(numHosts)) elif type == 'tree': print 'generating tree topology' topo = TreeTopo(depth=int(depth), fanout=int(fanout)) else: print 'invalid topology' print 'valid topology: single, linear or tree' sys.exit(2) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() print "Testing bandwidth between h1 and h4" h1, h4 = net.get('h1', 'h4') net.iperf((h1, h4)) net.stop()
def MininetTopo(): ''' Prepare Your Topology ''' net = Mininet (topo=None, link=TCLink, build=False) c0 = net.addController(name='c0', controller=RemoteController, ip=REMOTE_CONTROLLER_IP, port=6633) info("Create Host node\n") h1 = net.addHost('h1', mac='00:00:00:00:00:01', ip='10.0.0.1') h2 = net.addHost('h2', mac='00:00:00:00:00:02', ip='10.0.0.2') h3 = net.addHost('h3', mac='00:00:00:00:00:03', ip='10.0.0.3') h4 = net.addHost('h4', mac='00:00:00:00:00:04', ip='10.0.0.4') h5 = net.addHost('h5', mac='00:00:00:00:00:05', ip='10.0.0.5') h6 = net.addHost('h6', mac='00:00:00:00:00:06', ip='10.0.0.6') info("Create Switch node\n") s1 = net.addSwitch('s1', protocols='OpenFlow13') s2 = net.addSwitch('s2', protocols='OpenFlow13') s3 = net.addSwitch('s3', protocols='OpenFlow13') s4 = net.addSwitch('s4', protocols='OpenFlow13') info("Link switch to host\n") # net.addLink(s1, h1, 4, bw=BW) # net.addLink(s1, h2, 5, bw=BW) # net.addLink(s1, h3, 6, bw=BW) # net.addLink(s3, h4, 4, bw=BW) # net.addLink(s3, h5, 5, bw=BW) # net.addLink(s3, h6, 6, bw=BW) # net.addLink(s1, s2, 1, 1, bw=BW) # net.addLink(s2, s3, 2, 1, bw=BW) # net.addLink(s3, s4, 2, 2, bw=BW) # net.addLink(s4, s1, 1, 2, bw=BW) # net.addLink(s1, s3, 3, 3, bw=BW) # net.addLink(s2, s4, 3, 3, bw=BW) # net.addLink(s1, h1, 4, bw=BW) # net.addLink(s1, h2, 5, bw=BW) # net.addLink(s1, h3, 6, bw=BW) # net.addLink(s3, h4, 4, bw=BW) # net.addLink(s3, h5, 5, bw=BW) # net.addLink(s3, h6, 6, bw=BW) net.addLink(s1, h1, 4) net.addLink(s1, h2, 5) net.addLink(s1, h3, 6) net.addLink(s3, h4, 4) net.addLink(s3, h5, 5) net.addLink(s3, h6, 6) net.addLink(s1, s2, 1, 1) net.addLink(s2, s3, 2, 1) net.addLink(s3, s4, 2, 2) net.addLink(s4, s1, 1, 2) net.addLink(s1, s3, 3, 3) net.addLink(s2, s4, 3, 3) ''' Working your topology ''' info("Start network\n") net.build() c0.start() s1.start( [c0] ) s2.start( [c0] ) s3.start( [c0] ) s4.start( [c0] ) # info("Start xterm\n") # net.terms.append(makeTerm(c0)) info("Dumping host connections\n") dumpNodeConnections(net.hosts) h1.cmd("ping -c 1 10.0.0.6") h2.cmd("ping -c 1 10.0.0.4") h3.cmd("ping -c 1 10.0.0.5") h6.cmd("ping -c 1 10.0.0.1") h5.cmd("ping -c 1 10.0.0.3") h4.cmd("ping -c 1 10.0.0.2") h1.cmd("ping -c 1 10.0.0.6") sleep(2) print "Testing network connectivity" net.pingAll() print "Testing wget" h1.cmd("echo 1 > /proc/sys/net/ipv4/tcp_ecn") h2.cmd("echo 1 > /proc/sys/net/ipv4/tcp_ecn") h3.cmd("echo 1 > /proc/sys/net/ipv4/tcp_ecn") h4.cmd("echo 1 > /proc/sys/net/ipv4/tcp_ecn") h5.cmd("echo 1 > /proc/sys/net/ipv4/tcp_ecn") h6.cmd("echo 1 > /proc/sys/net/ipv4/tcp_ecn") sleep(3) h1.cmd("python -m SimpleHTTPServer 5001 &") h1.cmd("python -m SimpleHTTPServer 5002 &") h1.cmd("python -m SimpleHTTPServer 5003 &") h1.cmd("python -m SimpleHTTPServer 5004 &") h2.cmd("python -m SimpleHTTPServer 5001 &") h2.cmd("python -m SimpleHTTPServer 5002 &") h2.cmd("python -m SimpleHTTPServer 5003 &") h2.cmd("python -m SimpleHTTPServer 5004 &") h3.cmd("python -m SimpleHTTPServer 5001 &") h3.cmd("python -m SimpleHTTPServer 5002 &") h3.cmd("python -m SimpleHTTPServer 5003 &") h3.cmd("python -m SimpleHTTPServer 5004 &") sleep(1) print "wget from h1 to h4 via port 5001" h4.cmd("wget --limit-rate=3.125m http://10.0.0.1:5001/625M.log -o h4_tcp_p5001.txt &") sleep(10) print "wget from h2 to h5 via port 5001" h5.cmd("wget --limit-rate=3.125m http://10.0.0.2:5001/625M.log -o h5_tcp_p5001.txt &") sleep(10) print "wget from h3 to h6 via port 5001" h6.cmd("wget --limit-rate=3.125m http://10.0.0.3:5001/625M.log -o h6_tcp_p5001.txt &") sleep(10) print "wget from h1 to h4 via port 5002" h4.cmd("wget --limit-rate=3.125m http://10.0.0.1:5002/625M.log -o h4_tcp_p5002.txt &") sleep(10) print "wget from h2 to h5 via port 5002" h5.cmd("wget --limit-rate=3.125m http://10.0.0.2:5002/625M.log -o h5_tcp_p5002.txt &") sleep(10) print "wget from h3 to h6 via port 5002" h6.cmd("wget --limit-rate=3.125m http://10.0.0.3:5002/625M.log -o h6_tcp_p5002.txt &") sleep(10) print "wget from h1 to h4 via port 5003" h4.cmd("wget --limit-rate=3.125m http://10.0.0.1:5003/625M.log -o h4_tcp_p5003.txt &") sleep(10) print "wget from h2 to h5 via port 5003" h5.cmd("wget --limit-rate=3.125m http://10.0.0.2:5003/625M.log -o h5_tcp_p5003.txt &") sleep(10) print "wget from h3 to h6 via port 5003" h6.cmd("wget --limit-rate=3.125m http://10.0.0.3:5003/625M.log -o h6_tcp_p5003.txt &") sleep(10) print "wget from h1 to h4 via port 5004" h4.cmd("wget --limit-rate=3.125m http://10.0.0.1:5004/625M.log -o h4_tcp_p5004.txt &") sleep(10) print "wget from h2 to h5 via port 5004" h5.cmd("wget --limit-rate=3.125m http://10.0.0.2:5004/625M.log -o h5_tcp_p5004.txt &") sleep(10) print "wget from h3 to h6 via port 5004" h6.cmd("wget --limit-rate=3.125m http://10.0.0.3:5004/625M.log -o h6_tcp_p5004.txt &") # print "iperf from h1 to h4" # h1.cmd("iperf -c 10.0.0.4 -t 3600 -i 5 -p 5001 &") # sleep(10) # print "iperf from h2 to h5" # h2.cmd("iperf -c 10.0.0.5 -t 3600 -i 5 -p 5001 &") # sleep(10) # print "iperf from h3 to h6" # h3.cmd("iperf -c 10.0.0.6 -t 3600 -i 5 -p 5001 &") # sleep(10) # print "iperf udp from h1 to h4" # h1.cmd("sh udp_to_6.sh &") # sleep(10) # print "iperf udp from h2 to h5" # h2.cmd("sh udp_to_4.sh &") # sleep(10) # print "iperf udp from h3 to h6" # h3.cmd("sh udp_to_5.sh &") CLI(net) ''' Clean mininet ''' net.stop()
def bufferbloat(): if not os.path.exists(args.dir): os.makedirs(args.dir) os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong) topo = BBTopo() net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() # Start all the monitoring processes start_tcpprobe("cwnd.txt") # TODO: Start monitoring the queue sizes. Since the switch I # created is "s0", I monitor one of the interfaces. Which # interface? The interface numbering starts with 1 and increases. # Depending on the order you add links to your network, this # number may be 1 or 2. Ensure you use the correct number. qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % (args.dir)) # TODO: Start iperf, webservers, etc. # start_iperf(net) start_iperf(net) start_ping(net) start_webserver(net) # TODO: measure the time it takes to complete webpage transfer # from h1 to h2 (say) 3 times. Hint: check what the following # command does: curl -o /dev/null -s -w %{time_total} google.com # Now use the curl command to fetch webpage from the webserver you # spawned on host h1 (not from google!) # Hint: have a separate function to do this and you may find the # loop below useful. start_time = time() download_times = [] while True: # do the measurement (say) 3 times. download_times.append(download_webpage(net)) sleep(5) now = time() delta = now - start_time if delta > args.time: break print "%.1fs left..." % (args.time - delta) # TODO: compute average (and standard deviation) of the fetch # times. You don't need to plot them. Just note it in your # README and explain. download_times = map(float, download_times) print download_times average = average(download_times) sd = standard_deviation(download_times, average) print "average: %s" % (average) print "standard deviation: %s" % (sd) # Hint: The command below invokes a CLI which you can use to # debug. It allows you to run arbitrary commands inside your # emulated hosts h1 iand h2. # CLI(net) stop_tcpprobe() qmon.terminate() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def mainTopo(): #Define Initial Topology on Mininet os.system('mn -c') print('===========================================================================') print('===========================================================================') net = Mininet(link=TCLink, host=CPULimitedHost) net.start() Cl1 = net.addHost('Cl1', ip='192.168.1.2/24') Cl2 = net.addHost('Cl2', ip='192.168.2.2/24') Cl3 = net.addHost('Cl3', ip='192.168.3.2/24') Cl4 = net.addHost('Cl4', ip='192.168.4.2/24') Cl5 = net.addHost('Cl5', ip='192.168.5.2/24') Cl6 = net.addHost('Cl6', ip='192.168.6.2/24') Cl7 = net.addHost('Cl7', ip='192.168.7.2/24') Cl8 = net.addHost('Cl8', ip='192.168.8.2/24') Se1 = net.addHost('Se1', ip='192.168.10.2/24') Se2 = net.addHost('Se2', ip='192.168.20.2/24') Se3 = net.addHost('Se3', ip='192.168.30.2/24') Se4 = net.addHost('Se4', ip='192.168.40.2/24') Se5 = net.addHost('Se5', ip='192.168.50.2/24') Se6 = net.addHost('Se6', ip='192.168.60.2/24') Se7 = net.addHost('Se7', ip='192.168.70.2/24') Se8 = net.addHost('Se8', ip='192.168.80.2/24') Ro1 = net.addHost('Router1') net.addLink(Cl1, Ro1, bw=100) net.addLink(Cl2, Ro1, bw=100) net.addLink(Cl3, Ro1, bw=100) net.addLink(Cl4, Ro1, bw=100) net.addLink(Cl5, Ro1, bw=100) net.addLink(Cl6, Ro1, bw=100) net.addLink(Cl7, Ro1, bw=100) net.addLink(Cl8, Ro1, bw=100) net.addLink(Se1, Ro1, bw=BW) #, max_queue_size = maxQ net.addLink(Se2, Ro1, bw=BW) # net.addLink(Se3, Ro1, bw=BW) # net.addLink(Se4, Ro1, bw=BW) # net.addLink(Se5, Ro1, bw=BW) # net.addLink(Se6, Ro1, bw=BW) # net.addLink(Se7, Ro1, bw=BW) # net.addLink(Se8, Ro1, bw=BW) # net.build() print('===========================================================================') print('===========================================================================') Ro1.cmd('ifconfig Router1-eth0 0') Ro1.cmd('ifconfig Router1-eth1 0') Ro1.cmd('ifconfig Router1-eth2 0') Ro1.cmd('ifconfig Router1-eth3 0') Ro1.cmd('ifconfig Router1-eth4 0') Ro1.cmd('ifconfig Router1-eth5 0') Ro1.cmd('ifconfig Router1-eth6 0') Ro1.cmd('ifconfig Router1-eth7 0') Ro1.cmd('ifconfig Router1-eth8 0') Ro1.cmd('ifconfig Router1-eth9 0') Ro1.cmd('ifconfig Router1-eth10 0') Ro1.cmd('ifconfig Router1-et11 0') Ro1.cmd('ifconfig Router1-eth12 0') Ro1.cmd('ifconfig Router1-eth13 0') Ro1.cmd('ifconfig Router1-eth14 0') Ro1.cmd('ifconfig Router1-eth15 0') Ro1.cmd('ifconfig Router1-eth0') Ro1.cmd('ifconfig Router1-eth1') Ro1.cmd('ifconfig Router1-eth2') Ro1.cmd('ifconfig Router1-eth3') Ro1.cmd('ifconfig Router1-eth4') Ro1.cmd('ifconfig Router1-eth5') Ro1.cmd('ifconfig Router1-eth6') Ro1.cmd('ifconfig Router1-eth7') Ro1.cmd('ifconfig Router1-eth8') Ro1.cmd('ifconfig Router1-eth9') Ro1.cmd('ifconfig Router1-eth10') Ro1.cmd('ifconfig Router1-eth11') Ro1.cmd('ifconfig Router1-eth12') Ro1.cmd('ifconfig Router1-eth13') Ro1.cmd('ifconfig Router1-eth14') Ro1.cmd('ifconfig Router1-eth15') Ro1.cmd("ip addr add 192.168.1.1/24 brd + dev Router1-eth0") Ro1.cmd("ip addr add 192.168.2.1/24 brd + dev Router1-eth1") Ro1.cmd("ip addr add 192.168.3.1/24 brd + dev Router1-eth2") Ro1.cmd("ip addr add 192.168.4.1/24 brd + dev Router1-eth3") Ro1.cmd("ip addr add 192.168.5.1/24 brd + dev Router1-eth4") Ro1.cmd("ip addr add 192.168.6.1/24 brd + dev Router1-eth5") Ro1.cmd("ip addr add 192.168.7.1/24 brd + dev Router1-eth6") Ro1.cmd("ip addr add 192.168.8.1/24 brd + dev Router1-eth7") Ro1.cmd("ip addr add 192.168.10.1/24 brd + dev Router1-eth8") Ro1.cmd("ip addr add 192.168.20.1/24 brd + dev Router1-eth9") Ro1.cmd("ip addr add 192.168.30.1/24 brd + dev Router1-eth10") Ro1.cmd("ip addr add 192.168.40.1/24 brd + dev Router1-eth11") Ro1.cmd("ip addr add 192.168.50.1/24 brd + dev Router1-eth12") Ro1.cmd("ip addr add 192.168.60.1/24 brd + dev Router1-eth13") Ro1.cmd("ip addr add 192.168.70.1/24 brd + dev Router1-eth14") Ro1.cmd("ip addr add 192.168.80.1/24 brd + dev Router1-eth15") Ro1.cmd('sysctl -w net.ipv4.ip_forward=1') Cl1.cmd('ip route add default via 192.168.1.1') Cl2.cmd('ip route add default via 192.168.2.1') Cl3.cmd('ip route add default via 192.168.3.1') Cl4.cmd('ip route add default via 192.168.4.1') Cl5.cmd('ip route add default via 192.168.5.1') Cl6.cmd('ip route add default via 192.168.6.1') Cl7.cmd('ip route add default via 192.168.7.1') Cl8.cmd('ip route add default via 192.168.8.1') Se1.cmd('ip route add default via 192.168.10.1') Se2.cmd('ip route add default via 192.168.20.1') Se3.cmd('ip route add default via 192.168.30.1') Se4.cmd('ip route add default via 192.168.40.1') Se5.cmd('ip route add default via 192.168.50.1') Se6.cmd('ip route add default via 192.168.60.1') Se7.cmd('ip route add default via 192.168.70.1') Se8.cmd('ip route add default via 192.168.80.1') print('=================STARTING SCENARIO 4 (Bufferbloat 8 Device)=======================') print('===========================================================================') Cl1.cmdPrint('sysctl net.ipv4.tcp_congestion_control') Se2.cmdPrint('sysctl net.ipv4.tcp_congestion_control') print("QUEUE_SIZE", maxQ) print("LINK_BANDWIDTH, RO-SE ", BW) ccName = subprocess.check_output('cat /proc/sys/net/ipv4/tcp_congestion_control', shell=True) ccName = ccName.replace("\n","") print('===========================================================================') net.pingAll() print('===========================================================================') # Se2.cmd('iperf -s &') Se1.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server1.txt &') Se2.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server2.txt &') Se3.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server3.txt &') Se4.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server4.txt &') Se5.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server5.txt &') Se6.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server6.txt &') Se7.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server7.txt &') Se8.cmd('iperf -s > dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperf-server8.txt &') print(' Server Iperf Started') Se1.cmd('python -m SimpleHTTPServer &') Se2.cmd('python -m SimpleHTTPServer &') Se3.cmd('python -m SimpleHTTPServer &') Se4.cmd('python -m SimpleHTTPServer &') Se5.cmd('python -m SimpleHTTPServer &') Se6.cmd('python -m SimpleHTTPServer &') Se7.cmd('python -m SimpleHTTPServer &') Se8.cmd('python -m SimpleHTTPServer &') print('=========================================================================') #### STARTING EXAMINE #### print(' TCPDUMP Started Longlived for 65 s Please Wait') print(' Iperf Started') Cl1.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/1_LL_.pcapng -i Cl1-eth0 &') #62s Cl1.cmd('iperf -c 192.168.10.2 -t 30 -i 1 > dataResult/examine6/1_LL_iperfRests.txt &') #30s Cl1.cmd('ping 192.168.10.2 -c 30 > dataResult/examine6/1_LL_rttRests.txt & ') #30s # Cl1.cmd('ping 192.168.10.2 -c 9 ') Cl2.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/2_LL_.pcapng -i Cl2-eth0 &') #62s Cl2.cmd('iperf -c 192.168.20.2 -t 30 -i 1 > dataResult/examine6/2_LL_iperfRests.txt &') #30s Cl2.cmd('ping 192.168.20.2 -c 30 > dataResult/examine6/2_LL_rttRests.txt & ') #30s # Cl2.cmd('ping 192.168.20.2 -c 9 ') Cl3.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/3_LL_.pcapng -i Cl3-eth0 &') #62s Cl3.cmd('iperf -c 192.168.30.2 -t 30 -i 1 > dataResult/examine6/3_LL_iperfRests.txt &') #30s Cl3.cmd('ping 192.168.30.2 -c 30 > dataResult/examine6/3_LL_rttRests.txt & ') #30s # Cl3.cmd('ping 192.168.30.2 -c 9 ') Cl4.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/4_LL_.pcapng -i Cl4-eth0 &') #62s Cl4.cmd('iperf -c 192.168.40.2 -t 30 -i 1 > dataResult/examine6/4_LL_iperfRests.txt &') #30s Cl4.cmd('ping 192.168.40.2 -c 30 > dataResult/examine6/4_LL_rttRests.txt & ') #30s # Cl4.cmd('ping 192.168.40.2 -c 9 ') Cl5.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/5_LL_.pcapng -i Cl5-eth0 &') #62s Cl5.cmd('iperf -c 192.168.50.2 -t 30 -i 1 > dataResult/examine6/5_LL_iperfRests.txt &') #30s Cl5.cmd('ping 192.168.50.2 -c 30 > dataResult/examine6/5_LL_rttRests.txt & ') #30s # Cl5.cmd('ping 192.168.50.2 -c 9 ') Cl6.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/6_LL_.pcapng -i Cl6-eth0 &') #62s Cl6.cmd('iperf -c 192.168.60.2 -t 30 -i 1 > dataResult/examine6/6_LL_iperfRests.txt &') #30s Cl6.cmd('ping 192.168.60.2 -c 30 > dataResult/examine6/6_LL_rttRests.txt & ') #30s # Cl6.cmd('ping 192.168.60.2 -c 9 ') Cl7.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/7_LL_.pcapng -i Cl7-eth0 &') #62s Cl7.cmd('iperf -c 192.168.70.2 -t 30 -i 1 > dataResult/examine6/7_LL_iperfRests.txt &') #30s Cl7.cmd('ping 192.168.70.2 -c 30 > dataResult/examine6/7_LL_rttRests.txt & ') #30s # Cl7.cmd('ping 192.168.70.2 -c 9 ') Cl8.cmd('tcpdump -G 35 -W 1 -w /home/reghn/Documents/pcapngs/8_LL_.pcapng -i Cl8-eth0 &') #62s Cl8.cmd('iperf -c 192.168.80.2 -t 30 -i 1 > dataResult/examine6/8_LL_iperfRests.txt &') #30s Cl8.cmd('ping 192.168.80.2 -c 30 > dataResult/examine6/8_LL_rttRests.txt & ') #30s # Cl8.cmd('ping 192.168.80.2 -c 9 ') time.sleep(2) #pidCode = subprocess.check_output('pidof tcpdump', shell=True) #pidCode = pidCode.replace("\n","") #Cl1.cmd('kill '+str(pidCode)+'') #### rename file #### os.system('mv /home/reghn/Documents/pcapngs/1_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL1.pcapng') os.system('mv dataResult/examine6/1_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl1.txt') os.system('mv dataResult/examine6/1_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl1.txt') os.system('mv /home/reghn/Documents/pcapngs/2_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL2.pcapng') os.system('mv dataResult/examine6/2_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl2.txt') os.system('mv dataResult/examine6/2_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl2.txt') os.system('mv /home/reghn/Documents/pcapngs/3_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL3.pcapng') os.system('mv dataResult/examine6/3_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl3.txt') os.system('mv dataResult/examine6/3_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl3.txt') os.system('mv /home/reghn/Documents/pcapngs/4_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL4.pcapng') os.system('mv dataResult/examine6/4_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl4.txt') os.system('mv dataResult/examine6/4_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl4.txt') os.system('mv /home/reghn/Documents/pcapngs/5_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL5.pcapng') os.system('mv dataResult/examine6/5_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl5.txt') os.system('mv dataResult/examine6/5_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl5.txt') os.system('mv /home/reghn/Documents/pcapngs/6_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL6.pcapng') os.system('mv dataResult/examine6/6_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl6.txt') os.system('mv dataResult/examine6/6_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl6.txt') os.system('mv /home/reghn/Documents/pcapngs/7_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL7.pcapng') os.system('mv dataResult/examine6/7_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl7.txt') os.system('mv dataResult/examine6/7_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl7.txt') os.system('mv /home/reghn/Documents/pcapngs/8_LL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_CL8.pcapng') os.system('mv dataResult/examine6/8_LL_iperfRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_iperfRests_Cl8.txt') os.system('mv dataResult/examine6/8_LL_rttRests.txt dataResult/examine6/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_LL_rttRests._Cl8.txt') print('=========================================================================') print(' Python HTTP Server Start') print('=========================================================================') os.system('echo Shortlived Started for 10 s Please Wait') # Cl1.cmd('tcpdump -G 25 -W 1 -w /home/reghn/Documents/pcapngs/_SL_.pcapng -i Cl1-eth0 &') Cl1.cmdPrint('wget -q 192.168.10.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL_Cl1.png &') #os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl1') # os.system('mv /home/reghn/Documents/pcapngs/_SL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_SL_.pcapng') Cl2.cmdPrint('wget -q 192.168.20.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL_Cl2.png &') # os.system('mv /home/reghn/Documents/pcapngs/_SL_.pcapng /home/reghn/Documents/pcapngs/'+str(ccName)+'_'+str(maxQ)+'_'+str(BW)+'_SL_Cl2.pcapng') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl2') Cl3.cmdPrint('wget -q 192.168.30.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL_Cl3.png &') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl3') Cl4.cmdPrint('wget -q 192.168.40.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL.png &') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl4') Cl5.cmdPrint('wget -q 192.168.50.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL.png &') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl5') Cl6.cmdPrint('wget -q 192.168.60.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL.png &') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl6') Cl7.cmdPrint('wget -q 192.168.70.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL.png &') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl7') Cl8.cmdPrint('wget -q 192.168.80.2:8000 &') # os.system('scrot --delay 2 '+str(ccName)+''+str(maxQ)+'_'+str(BW)+'restSL.png &') # os.system('mv restSL.png restSL'+str(ccName)+''+str(maxQ)+'_'+str(BW)+'_Cl8') print('=========================================================================') print(" Processing all file's ") print('=========================================================================') time.sleep(40) # CLI(net) net.stop()
def tcpfair(): if not os.path.exists(args.dir): os.makedirs(args.dir) os.system("sudo sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong) # Set the red parameters passed to this code, otherwise use the default # settings that are set in Mininet code. red_settings = {} red_settings['limit'] = args.red_limit red_settings['min'] = args.red_min red_settings['max'] = args.red_max red_settings['avpkt'] = args.red_avpkt red_settings['burst'] = args.red_burst red_settings['prob'] = args.red_prob # Instantiate the topology using the require parameters topo = StarTopo(n=args.hosts, bw_host=args.bw_host, delay='%sms' % args.delay, bw_net=args.bw_net, maxq=args.maxq, enable_ecn=args.ecn, enable_red=args.red, red_params=red_settings, show_mininet_commands=0) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, autoPinCpus=True) net.start() # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() iface = "s0-eth1" set_red(iface, red_settings) os.system("tc -d qdisc show dev %s" % iface) os.system("sudo dumpcap -s 90 -i %s -a duration:100 &" % iface) # Allow for connections to be set up initially and then revert back the # speed of the bottleneck link to the original passed value #set_speed(iface, "2Gbit") start_receiver(net) start_senders(net, args.cong1) #start_senders(net,args.ecn) sleep(5) #set_speed(iface, "%.2fMbit" % args.bw_net) # Let the experiment stabilize initially sleep(20) # Start monitoring the queue sizes. #qmon = start_qmon(iface='s0-eth1', # outfile='%s/q.txt' % (args.dir)) # Start all the monitoring processes start_tcpprobe("cwnd.txt") # Run the experiment for the specified time start_time = time() while True: now = time() delta = now - start_time if delta > args.time: break #print "%.1fs left..." % (args.time - delta) sleep(1) # If the experiment involves marking bandwidth for different threshold # then get the rate of the bottlenect link if (args.mark_threshold): rates = get_rates(iface='s0-eth1', nsamples=CALIBRATION_SAMPLES + CALIBRATION_SKIP) rates = rates[CALIBRATION_SKIP:] reference_rate = median(rates) if (reference_rate > 20): with open(args.dir + "/k.txt", "a") as myfile: myfile.write(str(args.mark_threshold) + ",") myfile.write(str(reference_rate)) myfile.write("\n") myfile.close() stop_tcpprobe() #qmon.terminate() # dump statistics for each sender for i in range(args.hosts - 1): hn = net.getNodeByName('h%d' % (i + 1)) with open(args.dir + "/netstat-h%d.txt" % (i + 1), "w") as outfile: cmd = ("netstat -s") hn.popen(cmd, stdout=outfile, shell=True) outfile.close() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def simpleTest(): os.system("sudo mysql -u root -p mysql < ./schema.sql") "Create and test a simple network" topo = SingleSwitchTopo(n=4) net = Mininet(topo, controller=partial(RemoteController, ip='127.0.0.1', port=6633)) net.start() net.pingAll() # Inintalize components sff = net.get('sff1') firewall = net.get('firewall') admin = net.get('admin') facebook = net.get('facebook') google = net.get('google') naver = net.get('naver') instagram = net.get('instagram') dpi = net.get('dpi') staff_1 = net.get('staff_1') staff_2 = net.get('staff_2') manager = net.get('manager') president = net.get('president') pkt_gen = net.get('pkt_gen') sff.cmd('../bin/sff sff1-eth0 > /tmp/sff.out &') firewall.cmd('cd ../NSF/Firewall; sudo make init') firewall.cmd('secu') firewall.cmd('sudo ../../bin/firewall firewall-eth0 > /tmp/firewall.out &') dpi.cmd('cd ../NSF/DPI; sudo make init') dpi.cmd('secu') dpi.cmd('sudo ../../bin/dpi dpi-eth0 > /tmp/dpi.out &') admin.cmd('cd ../SecurityController') admin.cmd('sudo service apache2 stop >> /tmp/webserver.out') admin.cmd('sudo service apache2 start >> /tmp/webserver.out') admin.cmd('sudo python server.py >> /tmp/webserver.out &') # In order to check flow rule facebook.cmd('../bin/ipPacketReceiver > /tmp/facebook.out &') google.cmd('../bin/ipPacketReceiver > /tmp/google.out &') naver.cmd('../bin/ipPacketReceiver > /tmp/naver.out &') instagram.cmd('../bin/ipPacketReceiver > /tmp/instagram.out &') staff_1.cmd('../bin/ipPacketReceiver > /tmp/staff_1.out &') staff_2.cmd('../bin/ipPacketReceiver > /tmp/staff_2.out &') manager.cmd('../bin/ipPacketReceiver > /tmp/manager.out &') president.cmd('../bin/ipPacketReceiver > /tmp/president.out &') # Wait server # sleep(3); # Start Packet Generation #packetGenerator.cmd('while true; do ../bin/ipPacketGenerator ', packetGenerator.IP(), destination.IP(), '; sleep 1; done > /tmp/generator.out &'); # Wait For a While # sleep(5); # Clear all program #packetGenerator.cmd('kill %while'); #sff.cmd('echo -n end', 'nc -4u -w1', sff.IP(),'8000'); #sff.cmd('wait', sffProcessID); CLI(net) # Stop Simulation net.stop()
from mininet.net import Mininet from mininet.node import Controller from mininet.topo import SingleSwitchTopo from mininet.log import setLogLevel import os class POXBridge(Controller): "Custom Controller class to invoke POX forwarding.l2_learning" def start(self): "Start POX learning switch" self.pox = '%s/pox/pox.py' % os.environ['HOME'] self.cmd(self.pox, 'forwarding.l2_learning &') def stop(self): "Stop POX" self.cmd('kill %' + self.pox) controllers = {'poxbridge': POXBridge} if __name__ == '__main__': setLogLevel('info') net = Mininet(topo=SingleSwitchTopo(5), controller=POXBridge) net.start() net.pingAll() net.stop()
def main(): parser = build_parser() args = parser.parse_args() # Build topology topo = mn.StandardTopo(args.link_delay) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() # Dumps network topology dumpNodeConnections(net.hosts) # Performs a basic all pairs ping test to check connectivity drop_rate = net.pingAll() if drop_rate > 0: print 'Reachability test failed!! Please restart. ' return # Note: for the following TCP communication, # always start the receiver side first! data_size, num_attack = args.data_size, args.num_attack opt_interval, output_dir = args.opt_interval, args.output_dir h1 = net.get('h1') h2 = net.get('h2') # RTT = 4 * link_delay rtt = 4 * args.link_delay print('Round-trip delay is %.1f secs.' % (rtt / 1000.)) # sleep 2s to clean up packets in the network time.sleep(2.) # First, record a normal TCP communication print('Starting normal TCP connection...') start_time = time.time() h2.sendCmd('python reno.py --role receiver --host h2') h1.sendCmd('python reno.py --role sender --host h1 --rtt %d --limit %d'\ % (rtt, data_size)) h2.waitOutput() h1.waitOutput() print('Normal TCP connection done! (%.2f sec)' % (time.time() - start_time)) time.sleep(2.) # ACK Division attack plot print('Starting ACK Division attack...') start_time = time.time() h2.sendCmd('python attacker.py --host h2 --attack div --num %d' % num_attack) h1.sendCmd('python reno.py --role sender --host h1 --rtt %d --limit %d'\ % (rtt, data_size)) h2.waitOutput() h1.waitOutput() h2.cmd('mv attack_log.txt div_attack_log.txt') print('ACK Division attack done! (%.2f sec)' % (time.time() - start_time)) time.sleep(2.) # DupACK Spoofing attack plot print('Starting DupACK Spoofing attack...') start_time = time.time() h2.sendCmd('python attacker.py --host h2 --attack dup --num %d' % num_attack) h1.sendCmd('python reno.py --role sender --host h1 --rtt %d --limit %d'\ % (rtt, data_size)) h2.waitOutput() h1.waitOutput() h2.cmd('mv attack_log.txt dup_attack_log.txt') print('DupACK Spoofing attack done! (%.2f sec)' % (time.time() - start_time)) time.sleep(2.) # Optimistic ACKing attack plot print('Starting Optimistic ACKing attack...') start_time = time.time() h2.sendCmd('python attacker.py --host h2 --attack opt --num %d --interval %d'\ % (num_attack, opt_interval)) h1.sendCmd('python reno.py --role sender --host h1 --rtt %d --limit %d'\ % (rtt, data_size)) h2.waitOutput() h1.waitOutput() h2.cmd('mv attack_log.txt opt_attack_log.txt') print('Optimistic ACKing attack done! (%.2f sec)' % (time.time() - start_time)) # Shutdown mininet net.stop()
def mobilityTest(): net = Mininet(topo=None, switch=MobilitySwitch, build=False) # Create mobile nodes h1 = net.addHost('h1', cls=VLANHost, vlan=AP1_VLAN, mac='00:00:00:00:00:02', ip='192.168.0.2/24') # Create 'video' server. video = net.addHost('video', cls=VLANHost, vlan=AP1_VLAN, mac='00:00:00:00:00:01', ip='192.168.0.1/24') # Create AP and SDN switches ap1 = net.addSwitch('ap1', listenPort=6634, mac='00:a1:a1:a1:a1:a1', dpid='a1a1a1a1a1a1a1a1') ap2 = net.addSwitch('ap2', listenPort=6634, mac='00:a2:a2:a2:a2:a2', dpid='a2a2a2a2a2a2a2a2') sdnA = net.addSwitch('sdnA', listenPort=6634, mac='00:0A:0A:0A:0A:0A', dpid='0A0A0A0A0A0A0A0A') sdnB = net.addSwitch('sdnB', listenPort=6634, mac='00:0B:0B:0B:0B:0B', dpid='0B0B0B0B0B0B0B0B') router = net.addSwitch('router', listenPort=6634, mac='00:05:05:05:05:05', dpid='0505050505050505') print "*** Creating links" net.addLink(h1, ap1) net.addLink(ap1, sdnA, port1=2, port2=2) net.addLink(ap2, sdnB, port1=2, port2=2) net.addLink(sdnB, sdnA) net.addLink(sdnA, router) net.addLink(sdnB, router) net.addLink(video, router) # Add Controllers ctrl = net.addController('c0', controller=RemoteController, ip=CONTROLLER_IP, port=6633) net.build() # Connect switches to controller and/or perform ovs-ofctl commands to set up hard coded flows ap1.start([ctrl]) ap2.start([ctrl]) sdnA.start([ctrl]) sdnB.start([ctrl]) router.start([ctrl]) ################################################################################################ if DEBUG_FLOWS: print '* Initial network (before pingall):' printConnections(net.switches) _dummy = raw_input( "\nInitial network setup. Press [RETURN] for first 'pingall' command" ) # PingAll net.pingAll() if DEBUG_FLOWS: time.sleep(1) print '\n* Initial network (after first pingall):' printConnections(net.switches) ################################################################################################ # Pause to allow UI verification if DEBUG_FLOWS: _dummy = raw_input("\nPress [RETURN] to migrate hosts") else: print 'Sleep 5 seconds before moving both hosts' pause(5) ################################################################################################ print 'Moving host(s) to other AP' moveHost(h1, ap1, ap2, newPort=1) if DEBUG_FLOWS: _dummy = raw_input( "\nHosts moved. Press [RETURN] for next 'pingall' command") else: print 'Pinging again in 5 seconds' pause(5) net.pingAll() if DEBUG_FLOWS: _dummy = raw_input( "\nPress [RETURN] to migrate host(s) back to original AP") else: print 'Sleep 5 seconds before moving both host(s) back to original AP' pause(5) print 'Moving host(s) back to their Home-AP' moveHost(h1, ap2, ap1, newPort=1) if DEBUG_FLOWS: _dummy = raw_input( "\nHosts moved back to original home access point. Press [RETURN] for next 'pingall' command" ) else: print 'Pinging again in 5 seconds' pause(5) net.pingAll() print('Done. Use CLI for additional commands...') CLI(net) net.stop()
def myNetwork(): net = Mininet(topo=None, build=False, ipBase='10.0.0.0/8') info('*** Adding controller\n') c0 = net.addController(name='c0', controller=Controller, protocol='tcp', port=6633) info('*** Add switches\n') s1 = net.addSwitch('s1', cls=OVSKernelSwitch) info('*** Add hosts\n') h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None) h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None) h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None) info('*** Add links\n') net.addLink(s1, h2) net.addLink(s1, h3) net.addLink(s1, h1) info('*** Starting network\n') net.build() info('*** Starting controllers\n') for controller in net.controllers: controller.start() info('*** Starting switches\n') net.get('s1').start([c0]) info('*** Post configure switches and hosts\n') print('SETTING THE VERSION OF OPENFLOW TO BE USED IN EACH ROUTER:\n') s1.cmdPrint('ovs-vsctl set Bridge s1 protocols=OpenFlow13') print('FOR SAFETY I KILLALL CONTROLLER PREVIOUS:\n') c0.cmdPrint('killall controller') print('START REST_FIREWALL ON XTERM OF C0 CONTROLLER:\n') c0.cmdPrint('ryu-manager ryu.app.rest_firewall &') print('FIREWALL STARTED SET TO CUT OFF ALL COMMUNICATION:\n' ) #when it start, block all connections net.pingAll() #test it print('ENABLE FIREWALL:\n') c0.cmdPrint( 'curl -X PUT http://localhost:8080/firewall/module/enable/0000000000000001 &' ) c0.cmdPrint('curl http://localhost:8080/firewall/module/status &') #print('ADD RULES FOR PINGING BETWEEN H1 AND H2:\n') #Open an xterm of c0 in the mininet dialog (mininet> xterm c0) #c0.cmdPrint(curl -X POST -d '{"nw_src": "10.0.0.1/8", "nw_dst": "10.0.0.2/8", "nw_proto": "ICMP"}' http://localhost:8080/firewall/rules/0000000000000001) add this rule in c0 #c0.cmdPrint(curl -X POST -d '{"nw_src": "10.0.0.2/8", "nw_dst": "10.0.0.1/8", "nw_proto": "ICMP"}' http://localhost:8080/firewall/rules/0000000000000001) add this rule in c0 print('PING h1->h2\n') #net.pingAll() in mininet CLI (mininet> pingAll and you will notice that now h1 ping with success h2, while h3 still not reachable. #You have added some rules just for the ICMP protocol, but you can add others related to all protcols. CLI(net) net.stop()
class NetworkConfiguration(object): def __init__(self, controller, controller_ip, controller_port, controller_api_base_url, controller_api_user_name, controller_api_password, topo_name, topo_params, conf_root, synthesis_name, synthesis_params, roles, project_name="test", power_simulator_ip="127.0.0.1", link_latency=""): self.controller = controller self.topo_name = topo_name self.topo_params = topo_params self.topo_name = topo_name self.conf_root = conf_root self.synthesis_name = synthesis_name self.synthesis_params = synthesis_params self.roles = roles self.project_name = project_name self.power_simulator_ip = power_simulator_ip self.link_latency = link_latency self.controller_ip = controller_ip self.controller_port = controller_port self.topo = None self.nc_topo_str = None self.init_topo() self.init_synthesis() self.mininet_obj = None self.cm = None self.ng = None # Setup the directory for saving configs, check if one does not exist, # if not, assume that the controller, cyber_network and rule synthesis needs to be triggered. self.conf_path = self.conf_root + str(self) + "/" if not os.path.exists(self.conf_path): os.makedirs(self.conf_path) self.load_config = False self.save_config = True else: self.load_config = False self.save_config = True self.h = httplib2.Http() self.controller_api_base_url = controller_api_base_url self.controller_api_base_url = controller_api_base_url self.h.add_credentials(controller_api_user_name, controller_api_password) def __str__(self): return self.controller + "_" + str(self.synthesis) + "_" + str( self.topo) def __del__(self): self.cm.stop_controller() self.cleanup_mininet() def init_topo(self): if self.topo_name == "ring": self.topo = RingTopo(self.topo_params) self.nc_topo_str = "Ring topology with " + str( self.topo.total_switches) + " switches" elif self.topo_name == "clostopo": self.topo = ClosTopo(self.topo_params) self.nc_topo_str = "Clos topology with " + str( self.topo.total_switches) + " switches" elif self.topo_name == "linear": self.topo = LinearTopo(self.topo_params) self.nc_topo_str = "Linear topology with " + str( self.topo_params["num_switches"]) + " switches" elif self.topo_name == "clique": self.topo = CliqueTopo(self.topo_params) self.nc_topo_str = "Linear topology with " + str( self.topo_params["num_switches"]) + " switches" elif self.topo_name == "clique_enterprise": self.topo = CliqueEnterpriseTopo(self.topo_params) self.nc_topo_str = "Clique Enterprise topology with " + str( self.topo_params["num_switches"]) + " switches" else: raise NotImplementedError("Topology: %s" % self.topo_name) def init_synthesis(self): if self.synthesis_name == "DijkstraSynthesis": self.synthesis_params["master_switch"] = self.topo_name == "linear" self.synthesis = DijkstraSynthesis(self.synthesis_params) elif self.synthesis_name == "AboresceneSynthesis": self.synthesis = AboresceneSynthesis(self.synthesis_params) elif self.synthesis_name == "SimpleMACSynthesis": self.synthesis = SimpleMACSynthesis(self.synthesis_params) else: self.synthesis = None def prepare_all_flow_specifications(self): flow_specs = [] flow_match = Match(is_wildcard=True) #flow_match["ethernet_type"] = 0x0800 for src_host_id, dst_host_id in permutations(self.ng.host_ids, 2): if src_host_id == dst_host_id: continue fs = FlowSpecification(src_host_id, dst_host_id, flow_match) fs.ng_src_host = self.ng.get_node_object(src_host_id) fs.ng_dst_host = self.ng.get_node_object(dst_host_id) fs.mn_src_host = self.mininet_obj.get(src_host_id) fs.mn_dst_host = self.mininet_obj.get(dst_host_id) flow_specs.append(fs) return flow_specs def trigger_synthesis(self, synthesis_setup_gap): if self.synthesis_name == "DijkstraSynthesis": self.synthesis.network_graph = self.ng self.synthesis.synthesis_lib = SynthesisLib( "localhost", "8181", self.ng) self.synthesis.synthesize_all_node_pairs() elif self.synthesis_name == "AboresceneSynthesis": self.synthesis.network_graph = self.ng self.synthesis.synthesis_lib = SynthesisLib( "localhost", "8181", self.ng) flow_match = Match(is_wildcard=True) flow_match["ethernet_type"] = 0x0800 self.synthesis.synthesize_all_switches(flow_match, 2) elif self.synthesis_name == "SimpleMACSynthesis": self.synthesis.network_graph = self.ng self.synthesis.synthesis_lib = SynthesisLib( "localhost", "8181", self.ng) flow_specs = self.prepare_all_flow_specifications() self.synthesis.synthesize_flow_specifications(flow_specs) if synthesis_setup_gap: time.sleep(synthesis_setup_gap) if self.mininet_obj: #self.mininet_obj.pingAll() # full_data = self.mininet_obj.pingFull(hosts=[self.mininet_obj.get('h1'), # self.mininet_obj.get('h2')]) # print full_data """ h1 = self.mininet_obj.get('h1') h2 = self.mininet_obj.get('h2') s1 = self.mininet_obj.get('s1') cmd = "ping -c3 " + h2.IP() output = h1.cmd(cmd) macAddr = os.popen("ifconfig -a s1-eth1 | grep HWaddr | awk -F \' \' \'{print $5}\'").read().rstrip('\n') #macAddr = str(proc.stdout.read()) os.system("sudo tcprewrite --enet-smac=" + str(macAddr) + " --infile=/home/ubuntu/Desktop/Workspace/NetPower_TestBed/test.pcap --outfile=/home/ubuntu/Desktop/Workspace/NetPower_TestBed/test2.pcap") cmd = "sudo tcpreplay -i s1-eth1 /home/ubuntu/Desktop/Workspace/NetPower_TestBed/test2.pcap" os.system(cmd) #output = h1.cmd(cmd) print "here" """ def get_ryu_switches(self): ryu_switches = {} # Get all the ryu_switches from the inventory API remaining_url = 'stats/switches' resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") #CLI(self.mininet_obj) #import pdb; pdb.set_trace() ryu_switch_numbers = json.loads(content) for dpid in ryu_switch_numbers: this_ryu_switch = {} # Get the flows remaining_url = 'stats/flow' + "/" + str(dpid) resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") if resp["status"] == "200": switch_flows = json.loads(content) switch_flow_tables = defaultdict(list) for flow_rule in switch_flows[str(dpid)]: switch_flow_tables[flow_rule["table_id"]].append(flow_rule) this_ryu_switch["flow_tables"] = switch_flow_tables else: print "Error pulling switch flows from RYU." # Get the ports remaining_url = 'stats/portdesc' + "/" + str(dpid) resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") if resp["status"] == "200": switch_ports = json.loads(content) this_ryu_switch["ports"] = switch_ports[str(dpid)] else: print "Error pulling switch ports from RYU." # Get the groups remaining_url = 'stats/groupdesc' + "/" + str(dpid) resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") if resp["status"] == "200": switch_groups = json.loads(content) this_ryu_switch["groups"] = switch_groups[str(dpid)] else: print "Error pulling switch ports from RYU." ryu_switches[dpid] = this_ryu_switch with open(self.conf_path + "ryu_switches.json", "w") as outfile: json.dump(ryu_switches, outfile) def get_onos_switches(self): # Get all the onos_switches from the inventory API remaining_url = 'devices' resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") onos_switches = json.loads(content) for this_switch in onos_switches["devices"]: # Get the flows remaining_url = 'flows' + "/" + this_switch["id"] resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") if resp["status"] == "200": switch_flows = json.loads(content) switch_flow_tables = defaultdict(list) for flow_rule in switch_flows["flows"]: switch_flow_tables[flow_rule["tableId"]].append(flow_rule) this_switch["flow_tables"] = switch_flow_tables else: print "Error pulling switch flows from Onos." # Get the ports remaining_url = "links?device=" + this_switch["id"] resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") if resp["status"] == "200": switch_links = json.loads(content)["links"] this_switch["ports"] = {} for link in switch_links: if link["src"]["device"] == this_switch["id"]: this_switch["ports"][link["src"]["port"]] = link["src"] elif link["dst"]["device"] == this_switch["id"]: this_switch["ports"][link["dst"]["port"]] = link["dst"] else: print "Error pulling switch ports from RYU." # Get the groups remaining_url = 'groups' + "/" + this_switch["id"] resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") if resp["status"] == "200": this_switch["groups"] = json.loads(content)["groups"] else: print "Error pulling switch ports from RYU." with open(self.conf_path + "onos_switches.json", "w") as outfile: json.dump(onos_switches, outfile) def get_mininet_host_nodes(self): mininet_host_nodes = {} for sw in self.topo.switches(): mininet_host_nodes[sw] = [] for h in self.get_all_switch_hosts(sw): mininet_host_dict = { "host_switch_id": "s" + sw[1:], "host_name": h.name, "host_IP": h.IP(), "host_MAC": h.MAC() } mininet_host_nodes[sw].append(mininet_host_dict) with open(self.conf_path + "mininet_host_nodes.json", "w") as outfile: json.dump(mininet_host_nodes, outfile) return mininet_host_nodes def get_onos_host_nodes(self): # Get all the onos_hosts from the inventory API remaining_url = 'hosts' resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") onos_hosts = json.loads(content)["hosts"] with open(self.conf_path + "onos_hosts.json", "w") as outfile: json.dump(onos_hosts, outfile) return onos_hosts def get_host_nodes(self): if self.controller == "ryu": self.get_mininet_host_nodes() elif self.controller == "onos": self.get_onos_host_nodes() else: raise NotImplemented def get_mininet_links(self): mininet_port_links = {} with open(self.conf_path + "mininet_port_links.json", "w") as outfile: json.dump(self.topo.ports, outfile) return mininet_port_links def get_onos_links(self): # Get all the onos_links from the inventory API remaining_url = 'links' resp, content = self.h.request( self.controller_api_base_url + remaining_url, "GET") onos_links = json.loads(content)["links"] with open(self.conf_path + "onos_links.json", "w") as outfile: json.dump(onos_links, outfile) return onos_links def get_links(self): if self.controller == "ryu": self.get_mininet_links() elif self.controller == "onos": self.get_onos_links() else: raise NotImplementedError def get_switches(self): # Now the output of synthesis is carted away if self.controller == "ryu": self.get_ryu_switches() elif self.controller == "onos": self.get_onos_switches() else: raise NotImplementedError def setup_network_graph(self, mininet_setup_gap=None, synthesis_setup_gap=None): if not self.load_config and self.save_config: if self.controller == "ryu": self.cm = ControllerMan(controller=self.controller) self.cm.start_controller() #time.sleep(mininet_setup_gap) self.start_mininet() if mininet_setup_gap: time.sleep(mininet_setup_gap) # These things are needed by network graph... self.get_switches() self.get_host_nodes() self.get_links() self.ng = NetworkGraph(network_configuration=self) self.ng.parse_network_graph() if self.synthesis_name: # Now the synthesis... self.trigger_synthesis(synthesis_setup_gap) # Refresh just the switches in the network graph, post synthesis self.get_switches() self.ng.parse_network_graph() #self.ng.parse_switches() else: self.ng = NetworkGraph(network_configuration=self) self.ng.parse_network_graph() print "total_flow_rules:", self.ng.total_flow_rules return self.ng def start_mininet(self): self.cleanup_mininet() if self.controller == "ryu": self.mininet_obj = Mininet( topo=self.topo, cleanup=True, autoStaticArp=True, link=TCLink, controller=lambda name: RemoteController( name, ip=self.controller_ip, port=self.controller_port), switch=partial(OVSSwitch, protocols='OpenFlow13')) #self.set_switch_netdevice_owners() self.mininet_obj.start() def cleanup_mininet(self): if self.mininet_obj: print "Mininet cleanup..." #self.mininet_obj.stop() os.system("sudo mn -c") def get_all_switch_hosts(self, switch_id): p = self.topo.ports for node in p: # Only look for this switch's hosts if node != switch_id: continue for switch_port in p[node]: dst_list = p[node][switch_port] dst_node = dst_list[0] if dst_node.startswith("h"): yield self.mininet_obj.get(dst_node) def get_mininet_hosts_obj(self): for sw in self.topo.switches(): for h in self.get_all_switch_hosts(sw): yield h def is_host_pair_pingable(self, src_host, dst_host): hosts = [src_host, dst_host] ping_loss_rate = self.mininet_obj.ping(hosts, '1') # If some packets get through, then declare pingable if ping_loss_rate < 100.0: return True else: # If not, do a double check: cmd_output = src_host.cmd("ping -c 3 " + dst_host.IP()) print cmd_output if cmd_output.find("0 received") != -1: return False else: return True def are_all_hosts_pingable(self): ping_loss_rate = self.mininet_obj.pingAll('1') # If some packets get through, then declare pingable if ping_loss_rate < 100.0: return True else: return False def get_intf_status(self, ifname): # set some symbolic constants SIOCGIFFLAGS = 0x8913 null256 = '\0' * 256 # create a socket so we have a handle to query s = socket(AF_INET, SOCK_DGRAM) # call ioctl() to get the flags for the given interface result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, ifname + null256) # extract the interface's flags from the return value flags, = struct.unpack('H', result[16:18]) # check "UP" bit and print a message up = flags & 1 return ('down', 'up')[up] def wait_until_link_status(self, sw_i, sw_j, intended_status): num_seconds = 0 for link in self.mininet_obj.links: if (sw_i in link.intf1.name and sw_j in link.intf2.name) or ( sw_i in link.intf2.name and sw_j in link.intf1.name): while True: status_i = self.get_intf_status(link.intf1.name) status_j = self.get_intf_status(link.intf2.name) if status_i == intended_status and status_j == intended_status: break time.sleep(1) num_seconds += 1 return num_seconds def is_bi_connected_manual_ping_test(self, experiment_host_pairs_to_check, edges_to_try=None): is_bi_connected = True if not edges_to_try: edges_to_try = self.topo.g.edges() for edge in edges_to_try: # Only try and break switch-switch edges if edge[0].startswith("h") or edge[1].startswith("h"): continue for (src_host, dst_host) in experiment_host_pairs_to_check: is_pingable_before_failure = self.is_host_pair_pingable( src_host, dst_host) if not is_pingable_before_failure: print "src_host:", src_host, "dst_host:", dst_host, "are not connected." is_bi_connected = False break self.mininet_obj.configLinkStatus(edge[0], edge[1], 'down') self.wait_until_link_status(edge[0], edge[1], 'down') time.sleep(5) is_pingable_after_failure = self.is_host_pair_pingable( src_host, dst_host) self.mininet_obj.configLinkStatus(edge[0], edge[1], 'up') self.wait_until_link_status(edge[0], edge[1], 'up') time.sleep(5) is_pingable_after_restoration = self.is_host_pair_pingable( src_host, dst_host) if not is_pingable_after_failure == True: is_bi_connected = False print "Got a problem with edge:", edge, " for src_host:", src_host, "dst_host:", dst_host break return is_bi_connected def is_bi_connected_manual_ping_test_all_hosts(self, edges_to_try=None): is_bi_connected = True if not edges_to_try: edges_to_try = self.topo.g.edges() for edge in edges_to_try: # Only try and break switch-switch edges if edge[0].startswith("h") or edge[1].startswith("h"): continue is_pingable_before_failure = self.are_all_hosts_pingable() if not is_pingable_before_failure: is_bi_connected = False break self.mininet_obj.configLinkStatus(edge[0], edge[1], 'down') self.wait_until_link_status(edge[0], edge[1], 'down') time.sleep(5) is_pingable_after_failure = self.are_all_hosts_pingable() self.mininet_obj.configLinkStatus(edge[0], edge[1], 'up') self.wait_until_link_status(edge[0], edge[1], 'up') time.sleep(5) is_pingable_after_restoration = self.are_all_hosts_pingable() if not is_pingable_after_failure == True: is_bi_connected = False break return is_bi_connected def parse_iperf_output(self, iperf_output_string): data_lines = iperf_output_string.split('\r\n') interesting_line_index = None for i in xrange(len(data_lines)): if data_lines[i].endswith('Server Report:'): interesting_line_index = i + 1 data_tokens = data_lines[interesting_line_index].split() print "Transferred Rate:", data_tokens[7] print "Jitter:", data_tokens[9] def parse_ping_output(self, ping_output_string): data_lines = ping_output_string.split('\r\n') interesting_line_index = None for i in xrange(len(data_lines)): if data_lines[i].startswith('5 packets transmitted'): interesting_line_index = i + 1 data_tokens = data_lines[interesting_line_index].split() data_tokens = data_tokens[3].split('/') print 'Min Delay:', data_tokens[0] print 'Avg Delay:', data_tokens[1] print 'Max Delay:', data_tokens[2] def set_netdevice_owner_in_timekeeper(self, intfNames, pid): for name in intfNames: if name != "lo": print "Setting net-device owner for ", name set_netdevice_owner(pid, name) def set_switch_netdevice_owners(self): import pdb pdb.set_trace() for i in xrange(0, len(self.mininet_obj.switches)): mininet_switch = self.mininet_obj.switches[i] # set netdevices owner self.set_netdevice_owner_in_timekeeper(mininet_switch.intfNames(), mininet_switch.pid)
class Experimento: def __init__(self): self.net = None self.inputs = None def configureParams(self,ue): self.inputs = ue self.net = Mininet( controller=ue.getController(), switch=OVSSwitch, build=False, link=TCLink, topo = ue.getTopo() ) self.net.build() def getUnidadExperimental(self): return self.inputs def killTopo(self): subprocess.call(["mn", "-c"]) def killController(self): if self.inputs.getController() == 'ryu': for proc in psutil.process_iter(attrs=['pid', 'name']): if "ryu-manager" in proc.info['name']: os.kill(proc.info['pid'], 9) def startTest(self): self.net.start() def endTest(self): self.net.stop() def startCLI(self): CLI(self.net) def pingAllTest(self): #self.net.start() self.net.pingAll() #self.net.start() def pingMeasure(self, src_in = None, dst_in = None, veces = 4, intervalo = 1, filename = None): nodosClaves = self.inputs.obtenerNodosClaves() if filename == None: if src_in == None and dst_in == None: src = nodosClaves[1] dst = nodosClaves[2] #self.net.ping(src,dst) src = self.net.get(src) dst = self.net.get(dst) else: src = self.net.get(src_in) dst = self.net.get(dst_in) src.cmdPrint('ping -c',veces,'-i',intervalo,str(dst.IP())) else: if src_in == None and dst_in == None: src = nodosClaves[1] dst = nodosClaves[2] src = self.net.get(src) dst = self.net.get(dst) else: src = self.net.get(src_in) dst = self.net.get(dst_in) info("Starting Pings: %s ---> %s\n" % (str(src.IP()), str(dst.IP()))) logfile = open(filename, 'w') p = src.popen(['ping', str(dst.IP()), '-i', str(intervalo), '-c', str(veces)], stdout=PIPE) for line in p.stdout: logfile.write(line) p.wait() logfile.close() info("End pings ***\n") def iperfTest(self, src_in=None, dst_in=None, veces=4, filename=None): if filename == None: nodosClaves = self.inputs.obtenerNodosClaves() if src_in == None and dst_in == None: self.net.iperf() else: src = self.net.get(src_in) dst = self.net.get(dst_in) self.net.iperf([src, dst]) def iperfMeasure(self, src_in=None, dst_in=None, intervalo=1, tiempo = 10, filename = 'salida.log' ): nodosClaves = self.inputs.obtenerNodosClaves() if src_in == None and dst_in == None: src = nodosClaves[1] dst = nodosClaves[2] # self.net.ping(src,dst) src = self.net.get(src) dst = self.net.get(dst) else: src = self.net.get(src_in) dst = self.net.get(dst_in) logfile = open(filename, 'w') info("Starting Iperf: %s ---> %s\n" % (str(src.IP()), str(dst.IP()))) p1 = dst.popen(['iperf', '-s']) # Iniciando el servidor p2 = src.popen(['iperf', '-c', str(dst.IP()), '-i', str(intervalo), '-t ' + str(tiempo)], stdout=PIPE) for line in p2.stdout: logfile.write(line) p2.wait() logfile.close() info("*** End iperf measure ***\n")
def mainTopo(): os.system('mn -c') net = Mininet(link=TCLink, host=CPULimitedHost) Cl1 = net.addHost('Cl1', ip='192.168.1.2/29') Se2 = net.addHost('Se2', ip='192.168.2.2/29') Ro1 = net.addHost('Router1') net.addLink(Cl1, Ro1, bw=100) net.addLink(Se2, Ro1, bw=100, max_queue_size=40) net.build() Ro1.cmd('ifconfig Router1-eth0 0') Ro1.cmd('ifconfig Router1-eth1 0') Ro1.cmd('ifconfig Router1-eth0') Ro1.cmd('ifconfig Router1-eth1') Ro1.cmd('ifconfig Router1-eth0 hw ether mac=00:00:00:00:02:01') Ro1.cmd('ifconfig Router1-eth1 hw ether mac=00:00:00:00:02:02') Ro1.cmd("ip addr add 192.168.1.1/29 brd + dev Router1-eth0") Ro1.cmd("ip addr add 192.168.2.1/29 brd + dev Router1-eth1") Ro1.cmd('sysctl -w net.ipv4.ip_forward=1') Cl1.cmd('ip route add default via 192.168.1.1') Se2.cmd('ip route add default via 192.168.2.1') print( '====================STARTING SCENARIO 1 (Various Queue)==========================' ) print( '=================================================================================' ) Cl1.cmdPrint('sysctl net.ipv4.tcp_congestion_control') Se2.cmdPrint('sysctl net.ipv4.tcp_congestion_control') print( '=================================================================================' ) net.pingAll() print( '=================================================================================' ) Se2.cmd('iperf -s &') # Se2.cmd('iperf -s > dataResult/iperf-server.txt &') Se2.cmdPrint('echo Server Iperf Started') print( '===============================================================================' ) Se2.cmdPrint('python -m SimpleHTTPServer &') Se2.cmdPrint('echo Python HTTP Server Start') print( '===============================================================================' ) Cl1.cmdPrint('wireshark &') Cl1.cmdPrint( 'echo Wireshark Started, Manual Override') CLI(net) net.stop()
class Internet2Test(TestCase): def __init__(self): super(Internet2Test, self).__init__() self.topo = Internet2Topo() self.net = Mininet(self.topo, controller=lambda name: RemoteController( name, ip='127.0.0.1', port=6653), switch=OVSSwitch) self.popens = [] def post_start(self): self.net.pingAll() def inject_malicious_rules(self): if str(self.net.switch).find('OVSSwitch') != -1: rules = ( 'ovs-ofctl add-flow kans ip,nw_dst=10.0.1.1,priority=255,actions=output:3', 'ovs-ofctl add-flow losa ip,nw_dst=10.0.8.1,priority=255,actions=output:3' ) elif str(self.net.switch).find('UserSwitch') != -1: rules = ( 'dpctl unix:/tmp/kans flow-mod cmd=add,table=0,hard=300,prio=255 eth_type=0x800,ip_dst=10.0.1.1 apply:output=3', 'dpctl unix:/tmp/losa flow-mod cmd=add,table=0,hard=300,prio=255 eth_type=0x800,ip_dst=10.0.8.1 apply:output=3' ) else: raise ValueError( 'switch unrecognized! cannot inject malicious rules') for rule in rules: cmds = shlex.split(rule) subprocess.Popen(cmds) logging.info('malicious rule injected finished, current: {}'.format( datetime.datetime.now().strftime('%H:%M:%S.%f'))) kans = None losa = None for sw in self.net.switches: if sw.name == 'kans': kans = sw.dpid elif sw.name == 'losa': losa = sw.dpid logging.info( 'inject malicious rule on kans({}), forwarding 10.0.1.1 erroneously....' .format(kans)) logging.info( 'inject malicious rule on losa({}), forwarding 10.0.8.1 erroneously....' .format(losa)) def test(self): sport = 2000 dport_start = 3000 dport_end = 5000 count = 100000 delay = 50000 dports = '{}-{}'.format(dport_start, dport_end) # for sw in self.net.switches: # cmds = shlex.split('tcpdump -i {}-eth2 -w tcpdump-{}.pcap net 10.0.0.0/16 and ip'.format(sw.name, sw.name+'eth2')) # popen = subprocess.Popen(cmds) # self.popens.append(popen) for h in self.net.hosts: h.cmd('tcpdump -w tcpdump-{}.pcap net 10.0.0.0/8 and ip &'.format( h.name)) for src in self.net.hosts: # logging.info('ip of host {} is {}'.format(src, src.IP())) dsts = '' for dst in self.net.hosts: if src != dst: dsts += (' ' + dst.IP()) src.cmd( './sendpkt -i {}-eth0 -g {} -p {} -w {}us -c {} {} &'.format( src.name, sport, dports, delay, count, dsts)) logging.info( 'traffic injected for every pair of hosts, last for {} seconds...'. format(count * delay / 1000 / 1000)) logging.info('inject malicious flow rules after 20 seconds...') time.sleep(20) self.inject_malicious_rules() # CLI(self.net) def sleep_time_after_test_finish(self): return 360 def clean(self, exception=False): super(Internet2Test, self).clean(exception) # kill all related process for host in self.net.hosts: host.cmd('kill %./sendpkt') time.sleep(3) for h in self.net.hosts: h.cmd('kill %tcpdump') for popen in self.popens: popen.terminate() def post_test(self, exception=False): super(Internet2Test, self).post_test(exception) # copy property files to log directory pcaps = [ f for f in os.listdir('.') if os.path.isfile(f) and f.endswith('.pcap') ] for pcap in pcaps: if exception == True: try: os.remove(pcap) logging.info('pcap file have been deleted!') except OSError: logging.warning('Cannot delete pcap files') else: try: shutil.move(pcap, self.get_output_dir()) logging.info( 'pcap file {} have been moved to output directory'. format(pcap)) except IOError: logging.warning( 'cannot move pcap file {} to output directory.'.format( pcap))
from mininet.net import Mininet from mininet.node import OVSKernelSwitch from mininet.topolib import TreeTopo def ifconfigTest(net): "Run ifconfig on all hosts in net." hosts = net.hosts for host in hosts: info(host.cmd('ifconfig')) if __name__ == '__main__': lg.setLogLevel('info') info("*** Initializing Mininet and kernel modules\n") OVSKernelSwitch.setup() info("*** Creating network\n") network = Mininet(TreeTopo(depth=2, fanout=2), switch=OVSKernelSwitch, waitConnected=True) info("*** Starting network\n") network.start() info("*** Running ping test\n") network.pingAll() info("*** Running ifconfig test\n") ifconfigTest(network) info("*** Starting CLI (type 'exit' to exit)\n") CLI(network) info("*** Stopping network\n") network.stop()
def Mptcp(): net = Mininet(cleanup=True) #add 5 hosts with mininet default IP 10.0.0.1 to 10.0.0.5 to the network topology h1 = net.addHost('h1') h2 = net.addHost('h2') h3 = net.addHost('h3') h4 = net.addHost('h4') h5 = net.addHost('h5') #add 1 switch to the topology s3 = net.addSwitch('s3') c0 = net.addController('c0') #connect 5 hosts through a switch net.addLink(h1, s3, cls=TCLink, bw=1000) net.addLink(h2, s3, cls=TCLink, bw=1000) net.addLink(h3, s3, cls=TCLink, bw=1000) net.addLink(h4, s3, cls=TCLink, bw=1000) net.addLink(h5, s3, cls=TCLink, bw=1000) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() print "Testing bandwidth between h1 and h4" #CLI(net) time.sleep(1) # wait for net to startup print "\n", " " * 5, "#" * 40, "\n", " " * 10, "STARTING\n" #disable mptcp on hosts and it inturns tcp if tcp_on: set_mptcp_enabled(False) h2_out = h1.cmdPrint('ifconfig') #test connectivity of h1(client) to the h2(server) from both the interfaces. print "ping test output: %s\n" % h2_out h2_out = h1.cmdPrint('ping -c 1 ' + h2.IP() + ' ') print "ping test output: %s\n" % h2_out #give time to print ping output time.sleep(3) print 'starting iperf server at: ', h2.IP() h2.cmd('iperf -s' + str(test_duration) + '> iperf_mptcp_server_log.txt & ') time.sleep(test_duration / 5.0) print 'starting iperf client at', h1.IP, ', connect to ', h2.IP() h1.cmd('iperf -n 2G -c ' + h2.IP() + ' >> iperf_tcp_client_log.txt &') #give time to print iperf output time.sleep(test_duration / 1.2) print "\niperf client response:" print h1.cmd('cat iperf_tcp_client_log.txt') h2.cmd('kill -9 %iperf') time.sleep(test_duration / 3.0) #use CLI to check any commands or outputs CLI(net) net.stop() os.system("sudo mn -c")
def mobilityTest(): net = Mininet(controller=RemoteController, switch=MultiSwitch, build=False) #Adding controllers for c in [c0, c1, c2, c3, c4]: net.addController(c) #Adding switches s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') s3 = net.addSwitch('s3') s4 = net.addSwitch('s4') s5 = net.addSwitch('s5') s6 = net.addSwitch('s6') s7 = net.addSwitch('s7') s8 = net.addSwitch('s8') s9 = net.addSwitch('s9') s10 = net.addSwitch('s10') s11 = net.addSwitch('s11') s12 = net.addSwitch('s12') s13 = net.addSwitch('s13') s14 = net.addSwitch('s14') s15 = net.addSwitch('s15') s16 = net.addSwitch('s16') s17 = net.addSwitch('s17') #Adding hosts h1 = net.addHost('h1') h2 = net.addHost('h2') h3 = net.addHost('h3') h4 = net.addHost('h4') h5 = net.addHost('h5') h6 = net.addHost('h6') h7 = net.addHost('h7') h8 = net.addHost('h8') h9 = net.addHost('h9') h10 = net.addHost('h10') h11 = net.addHost('h11') h12 = net.addHost('h12') h13 = net.addHost('h13') h14 = net.addHost('h14') h15 = net.addHost('h15') h16 = net.addHost('h16') h17 = net.addHost('h17') #adding link between switches and hosts net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s6, h6) net.addLink(s7, h7) net.addLink(s8, h8) net.addLink(s9, h9) net.addLink(s10, h10) net.addLink(s11, h11) net.addLink(s12, h12) net.addLink(s13, h13) net.addLink(s14, h14) net.addLink(s15, h15) net.addLink(s16, h16) net.addLink(s17, h17) #Adding links between switches net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) net.addLink(s5, s6) net.addLink(s6, s7) net.addLink(s7, s8) net.addLink(s8, s9) net.addLink(s9, s10) net.addLink(s10, s11) net.addLink(s11, s12) net.addLink(s12, s13) net.addLink(s13, s14) net.addLink(s14, s15) net.addLink(s15, s16) net.addLink(s16, s17) net.build() net.start() net.pingAll() #start communication between two hosts h1.cmd('iperf -s & > s.txt') h16.cmd('iperf -c h1 > c.txt ') #Code for moving host to another domain printConnections(net.switches) h1, old = net.get('h1', 's1') # If you want to increase the mobility frequency, add for loop # and call the functions for moving the hosts between multiple domains new = net['s7'] port = randint(10, 20) #port = 4 print '* Moving', h1, 'from', old, 'to', new, 'port', port hintf, sintf = moveHost(h1, old, new, newPort=port) print '*', hintf, 'is now connected to', sintf print '* Clearing out old flows' for sw in net.switches: sw.dpctl('del-flows') t2 = time.clock() print 'end time:', t2 time1 = t2 - t1 print 'time taken:', time1 print '* New network:' printConnections(net.switches) print '* Testing connectivity:' h1.cmd('kill %while') CLI(net) net.stop()
def quic_exchange(): dumbbell = DumbbellTopo() network = Mininet(topo=dumbbell, host=CPULimitedHost, link=TCLink, autoPinCpus=True) network.start() dumpNodeConnections(network.hosts) network.pingAll() #crossClient = network.get('cClient') #crossServer = network.get('cServer') appClient = network.get('aClient') appServer = network.get('aServer') # disable offloading - when enabled, permits segments larger than 1500 bytes #crossClient.cmd('ethtool -K ' + str(crossClient.intf()) + ' gso off') #crossServer.cmd('ethtool -K ' + str(crossServer.intf()) + ' gso off') appClient.cmd('ethtool -K ' + str(appClient.intf()) + ' gso off') appServer.cmd('ethtool -K ' + str(appServer.intf()) + ' gso off') #appClient.cmd('echo `ping', appServer.IP(), ' > vagrant_data/client-sent-ping`') #appServer.cmd('echo `ping', appClient.IP(), ' > vagrant_data/server-sent-ping`') #appServer.cmd('./vagrant/quiche/examples/server ', appServer.IP(), ' 5006 >> vagrant_data/quic-server') #for i in range(5): #appClient.cmd('./vagrant/quiche/examples/client ', appServer.IP(), ' 5006 >> vagrant_data/quic-client') #time.sleep(1) #appServer.cmd('./ngtcp2/examples/server -b 3 -t 0.1 -l 100 -i 3000 -q -f 60 ' + appServer.IP() + ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/PARTIAL_3B_10L_100F_2000D_60R_server.txt &') #pid = int( appServer.cmd('./ngtcp2/examples/server -b 3 -t 0.1 -l 100 -i 3000 -q -f 60 ' + appServer.IP() + ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/PARTIAL_3B_10L_18000F_2000D_60R_server.txt') ) #appServer.cmd('wait', pid) #appServer.cmd('./home/mininet/quic_data/mininet/ngtcp2/examples/server -b 3 -t 0.1 -l 100 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ../test-ca/rsa/ca.key test-ca/rsa/ca.cert') #appServer.cmd('echo "hi" >> foo.txt') #appServer.cmd('sudo ./ngtcp2/datasets/generate_data_server.sh ' + appServer.IP() + ' &') #print("server command run") #print('sudo ./ngtcp2/datasets/generate_data_server.sh ' + appServer.IP() + ' &') #appServer.cmd('./ngtcp2/examples/server -b 3 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/PARTIAL_3B_0L_18000F_2000D_60R_server.txt &') #for i in range(5): #appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ' + appServer.IP() + ' 5004 > ngtcp2/datasets/PARTIAL_3B_0L_100F_2000D_60R_client.txt') #pid = int( appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ' + appServer.IP() + ' 5004 > ngtcp2/datasets/PARTIAL_3B_10L_18000F_2000D_60R_client.txt') ) #appClient.cmd('wait', pid) #appClient.cmd('./home/mininet/quic_data/mininet/ngtcp2/examples/client -b 3 -e -a 3000 -q', appServer.IP(), ' 5004') #print('sudo ./ngtcp2/datasets/generate_data_client.sh ' + appServer.IP() + ' &') #appClient.cmd('sudo ./ngtcp2/datasets/generate_data_client.sh ' + appServer.IP() + ' &') #print("client command run") #appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/PARTIAL_3B_0L_18000F_2000D_60R_client.txt') ''' #0% loss, partial ################################# appServer.cmd('./ngtcp2/examples/server -b 3 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/PARTIAL_3B_0L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/PARTIAL_3B_0L_18000F_2000D_60R_client.txt') time.sleep(20) ################################# #0.01% loss, partial ################################# appServer.cmd('./ngtcp2/examples/server -b 3 -t 0.0001 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/PARTIAL_3B_001L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/PARTIAL_3B_001L_18000F_2000D_60R_client.txt') time.sleep(20) ################################# #0.1% loss, partial ################################# appServer.cmd('./ngtcp2/examples/server -b 3 -t 0.001 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/PARTIAL_3B_01L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/PARTIAL_3B_01L_18000F_2000D_60R_client.txt') time.sleep(20) ################################ ''' #1% loss, partial ################################# appServer.cmd( './ngtcp2/examples/server -b 3 -t 0.01 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/PARTIAL_3B_1L_18000F_2000D_60R_server.txt &' ) appClient.cmd( './ngtcp2/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/PARTIAL_3B_1L_18000F_2000D_60R_client.txt' ) time.sleep(20) ################################# ''' #3% loss, partial ################################# appServer.cmd('./ngtcp2/examples/server -b 3 -t 0.03 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/PARTIAL_3B_3L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/PARTIAL_3B_3L_18000F_2000D_60R_client.txt') time.sleep(20) ################################# #0% loss, reliable ################################# appServer.cmd('./ngtcp2-reliable/examples/server -b 3 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/RELIABLE_3B_0L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2-reliable/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/RELIABLE_3B_0L_18000F_2000D_60R_client.txt') time.sleep(20) ################################# #0.01% loss, reliable ################################# appServer.cmd('./ngtcp2-reliable/examples/server -b 3 -t 0.0001 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/RELIABLE_3B_001L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2-reliable/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/RELIABLE_3B_001L_18000F_2000D_60R_client.txt') time.sleep(20) ################################# #0.1% loss, reliable ################################# appServer.cmd('./ngtcp2-reliable/examples/server -b 3 -t 0.001 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/RELIABLE_3B_01L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2-reliable/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/RELIABLE_3B_01L_18000F_2000D_60R_client.txt') time.sleep(20) ################################ ''' #1% loss, reliable ################################# appServer.cmd( './ngtcp2-reliable/examples/server -b 3 -t 0.01 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/RELIABLE_3B_1L_18000F_2000D_60R_server.txt &' ) appClient.cmd( './ngtcp2-reliable/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/RELIABLE_3B_1L_18000F_2000D_60R_client.txt' ) time.sleep(20) ################################# ''' #3% loss, reliable ################################# appServer.cmd('./ngtcp2-reliable/examples/server -b 3 -t 0.03 -l 18000 -i 3000 -q -f 60 ', appServer.IP(), ' 5004 ngtcp2/test-ca/rsa/ca.key ngtcp2/test-ca/rsa/ca.cert > ngtcp2/datasets/50ms/RELIABLE_3B_3L_18000F_2000D_60R_server.txt &') appClient.cmd('./ngtcp2-reliable/examples/client -b 3 -e -a 3000 -q ', appServer.IP(), ' 5004 > ngtcp2/datasets/50ms/RELIABLE_3B_3L_18000F_2000D_60R_client.txt') time.sleep(20) ################################# ''' network.stop() time.sleep(5)
def bufferbloat(): if args.http3: print("http3") else: print("tcp") if not os.path.exists(args.dir): os.makedirs(args.dir) os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong) topo = BBTopo() net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() # TODO: Start monitoring the queue sizes. Since the switch I # created is "s0", I monitor one of the interfaces. Which # interface? The interface numbering starts with 1 and increases. # Depending on the order you add links to your network, this # number may be 1 or 2. Ensure you use the correct number. qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % (args.dir)) # TODO: Start iperf, webservers, etc. start_iperf(net) start_ping(net) start_webserver(net) # TODO: measure the time it takes to complete webpage transfer # from h1 to h2 (say) 3 times. Hint: check what the following # command does: curl -o /dev/null -s -w %{time_total} google.com # Now use the curl command to fetch webpage from the webserver you # spawned on host h1 (not from google!) # As a sanity check, before the time measurement, check whether the # webpage is transferred successfully by checking the response from curl # Hint: have a separate function to do this and you may find the # loop below useful. client = net.get('h2') server = net.get('h1') start_time = time() measurement = [] while True: # do the measurement (say) 3 times. for _ in range(0, 3): valid = client.popen("curl -ILs %s/http/index.html ^HTTP" % (server.IP()), shell=True).communicate()[0] # 200 ---> request success if "200".encode(encoding="utf-8") in valid: print("pass sanity") response_t = client.popen( "curl -o /dev/null -s -w %%{time_total} %s/http/index.html" % server.IP(), shell=True).communicate()[0] print("response_time: " + response_t.decode("utf-8")) measurement.append(float(response_t)) else: print("fail sanity") sleep(5) now = time() delta = now - start_time if delta > args.time: break print("%.1fs left..." % (args.time - delta)) # TODO: compute average (and standard deviation) of the fetch # times. You don't need to plot them. Just note it in your # README and explain. sd = stdev(measurement) avg_res = avg(measurement) print("Mean: {}, Stddev: {}".format(avg_res, sd)) # Hint: The command below invokes a CLI which you can use to # debug. It allows you to run arbitrary commands inside your # emulated hosts h1 and h2. # CLI(net) qmon.terminate() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def test_ping(): topo = TopoTest() net = Mininet(topo=topo, link=TCLink) net.start() net.pingAll() net.stop()