def run(): topo = MyTopo() net = Mininet(topo=topo, controller=RemoteController ) s1, s2, s3, s4 = net.get('s1','s2','s3','s4') h1, h2, h3, h4 = net.get('h1','h2','h3','h4') s1.cmd('ifconfig s1-eth1 inet6 add 2001::1/64') s1.cmd('ifconfig s1-eth2 inet6 add 2000:12::1/64') s1.cmd('ifconfig s1-eth3 inet6 add 2000:13::1/64') s1.cmd('ifconfig s1-eth4 inet6 add 2000:14::1/64') s2.cmd('ifconfig s2-eth1 inet6 add 2002::1/64') s2.cmd('ifconfig s2-eth2 inet6 add 2000:12::2/64') s2.cmd('ifconfig s2-eth3 inet6 add 2000:23::2/64') s2.cmd('ifconfig s2-eth4 inet6 add 2000:24::2/64') s3.cmd('ifconfig s3-eth1 inet6 add 2003::1/64') s3.cmd('ifconfig s3-eth2 inet6 add 2000:13::3/64') s3.cmd('ifconfig s3-eth3 inet6 add 2003:23::3/64') s3.cmd('ifconfig s3-eth4 inet6 add 2003:34::3/64') s4.cmd('ifconfig s4-eth1 inet6 add 2003::1/64') s4.cmd('ifconfig s4-eth2 inet6 add 2000:14::4/64') s4.cmd('ifconfig s4-eth3 inet6 add 2003:24::4/64') s4.cmd('ifconfig s4-eth3 inet6 add 2003:34::4/64') net.start() CLI(net) net.stop()
def intfOptions(): "run various traffic control commands on a single interface" net = Mininet( autoStaticArp=True ) net.addController( 'c0' ) h1 = net.addHost( 'h1' ) h2 = net.addHost( 'h2' ) s1 = net.addSwitch( 's1' ) link1 = net.addLink( h1, s1, cls=TCLink ) net.addLink( h2, s1 ) net.start() # flush out latency from reactive forwarding delay net.pingAll() info( '\n*** Configuring one intf with bandwidth of 5 Mb\n' ) link1.intf1.config( bw=5 ) info( '\n*** Running iperf to test\n' ) net.iperf() info( '\n*** Configuring one intf with loss of 50%\n' ) link1.intf1.config( loss=50 ) info( '\n' ) net.iperf( ( h1, h2 ), l4Type='UDP' ) info( '\n*** Configuring one intf with delay of 15ms\n' ) link1.intf1.config( delay='15ms' ) info( '\n*** Run a ping to confirm delay\n' ) net.pingPairFull() info( '\n*** Done testing\n' ) net.stop()
def cs461net(): stophttp() "Create a simple network for cs461" r = get_ip_setting() if r == -1: exit("Couldn't load config file for ip addresses, check whether %s exists" % IPCONFIG_FILE) else: info( '*** Successfully loaded ip settings for hosts\n %s\n' % IP_SETTING) topo = CS461Topo() info( '*** Creating network\n' ) net = Mininet( topo=topo, controller=RemoteController, ipBase=IPBASE ) net.start() server1, server2, client, router = net.get( 'server1', 'server2', 'client', 'sw0') s1intf = server1.defaultIntf() s1intf.setIP('%s/8' % IP_SETTING['server1']) s2intf = server2.defaultIntf() s2intf.setIP('%s/8' % IP_SETTING['server2']) clintf = client.defaultIntf() clintf.setIP('%s/8' % IP_SETTING['client']) for host in server1, server2, client: set_default_route(host) starthttp( server1 ) starthttp( server2 ) CLI( net ) stophttp() net.stop()
def run(): #clean previous run os.system("rm -f /tmp/*.pid logs/*") os.system("mn -c >/dev/null 2>&1") os.system("killall -9 zebra bgpd > /dev/null 2>&1") os.system("/etc/init.d/quagga restart") topo = BgpPoisoningTopo() net = Mininet( topo=topo ) net.start() for router in net.switches: router.cmd("sysctl -w net.ipv4.ip_forward=1") router.waitOutput() #Waiting (sleepytime) seconds for sysctl changes to take effect.. sleep(sleepytime) for R in ['R1','R2','R3','R4','R5','R6','R7']: net.get(R).initZebra("conf/zebra-%s.conf" % R) router.waitOutput() net.get(R).initBgpd("conf/bgpd-%s.conf" % R) router.waitOutput() CLI( net ) net.stop()
def simpleTest(): "Create and test a simple network" topo = DiamondTopo(k=4) net = Mininet(topo=topo,link=TCLink,controller=RemoteController) if args.sim!=1: print "Adding real interfaces" s1 = net.getNodeByName('s1') s3 = net.getNodeByName('s3') addRealIntf(net,args.intf1,s1) addRealIntf(net,args.intf2,s3) opts = '-D -o UseDNS=no -u0' rootnode=sshd(net, opts=opts) h2 = net.getNodeByName('h2') h2.cmd('iperf -s -p 5001 -i 1 > iperf-recv_TCP.txt &') h2.cmd('iperf -s -p 5003 -u -i 1 > iperf-recv_UDP.txt &') else: net.start() CLI(net) os.system('killall -9 iperf' ) if args.sim!=1: net.hosts[0].cmd('killall -9 dhcpd') for host in net.hosts: host.cmd('kill %'+ '/usr/sbin/sshd') stopNAT(rootnode) net.stop()
def output(partIdx): """Uses the student code to compute the output for test cases.""" outputString = "" if partIdx == 0: # This is agPA2 "Set up link parameters" print "a. Setting link parameters" "--- core to aggregation switches" linkopts1 = {"bw": 50, "delay": "5ms"} "--- aggregation to edge switches" linkopts2 = {"bw": 30, "delay": "10ms"} "--- edge switches to hosts" linkopts3 = {"bw": 10, "delay": "15ms"} "Creating network and run simple performance test" print "b. Creating Custom Topology" topo = CustomTopo(linkopts1, linkopts2, linkopts3, fanout=3) print "c. Firing up Mininet" net = Mininet(topo=topo, link=TCLink) net.start() h1 = net.get("h1") h27 = net.get("h27") print "d. Starting Test" # Start pings outputString = h1.cmd("ping", "-c6", h27.IP()) print "e. Stopping Mininet" net.stop() return outputString.strip()
def main(): start = time() try: topo = NetworkTopo(switch_bw=args.bw_net, host_bw=args.bw_host, switch_delay='%sms' %(args.delay, ), queue_size=23593) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) net.start() dumpNodeConnections(net.hosts) net.pingAll() if args.http: test_http(net) else: run_tcp_first(net, args.tcp_n) except: print "-"*80 print "Caught exception. Cleaning up..." print "-"*80 import traceback traceback.print_exc() raise finally: stop_all_iperf() net.stop() Popen("killall -9 top bwm-ng tcpdump cat mnexec; mn -c", shell=True, stderr=PIPE) Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait() stop_tcpprobe() end = time() cprint("Experiment took %s seconds\n" % (end - start), "yellow")
def main(): "Create and run experiment" start = time() topo = ParkingLotTopo(n=args.n) host = custom(CPULimitedHost, cpu=.15) # 15% of system bandwidth link = custom(TCLink, bw=args.bw, delay='1ms', max_queue_size=200) net = Mininet(topo=topo, host=host, link=link) net.start() cprint("*** Dumping network connections:", "green") dumpNetConnections(net) cprint("*** Testing connectivity", "blue") net.pingAll() if args.cli: # Run CLI instead of experiment CLI(net) else: cprint("*** Running experiment", "magenta") run_parkinglot_expt(net, n=args.n) net.stop() end = time() os.system("killall -9 bwm-ng") cprint("Experiment took %.3f seconds" % (end - start), "yellow")
def setupNetwork(): "Create network" topo = MyTopo() network = Mininet(topo=topo, autoSetMacs=True, controller=None) network.start() CLI( network ) network.stop()
def run(): "Create control and data networks, and invoke the CLI" info( '* Creating Control Network\n' ) ctopo = ControlNetwork( n=4, dataController=DataController ) cnet = Mininet( topo=ctopo, ipBase='192.168.123.0/24', controller=None ) info( '* Adding Control Network Controller\n') cnet.addController( 'cc0', controller=Controller ) info( '* Starting Control Network\n') cnet.start() info( '* Creating Data Network\n' ) topo = TreeTopo( depth=2, fanout=2 ) # UserSwitch so we can easily test failover sw = partial( UserSwitch, opts='--inactivity-probe=1 --max-backoff=1' ) net = Mininet( topo=topo, switch=sw, controller=None ) info( '* Adding Controllers to Data Network\n' ) for host in cnet.hosts: if isinstance(host, Controller): net.addController( host ) info( '* Starting Data Network\n') net.start() mn = MininetFacade( net, cnet=cnet ) CLI( mn ) info( '* Stopping Data Network\n' ) net.stop() info( '* Stopping Control Network\n' ) cnet.stop()
def multiSwitchTest(): topo = MultiSwitchTopo(depth=2, fanout=4) #net = Mininet(topo, controller=OVSController) net = Mininet(topo, controller=lambda name: RemoteController(name, ip='192.168.56.1')) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() receivers = ["00:11:22:33:44:00", "00:11:22:33:44:04", "00:11:22:33:44:08", "00:11:22:33:44:0c"] for host in net.hosts: if host.defaultIntf().MAC() in receivers: startLogReceiver(host) else: startLogSender(host) for host in net.hosts: if not (host.defaultIntf().MAC() in receivers): runGenerator(host) for host in net.hosts: if host.defaultIntf().MAC() in receivers: stopLogReceiver(host) else: stopLogSender(host) net.stop()
def sdnTopo(interface_name): CONTROLLER_IP='10.0.0.200' net = Mininet( topo=None, build=False) # Create nodes h1 = net.addHost( 'h1', ip='10.0.0.1/8' ) h2 = net.addHost( 'h2', ip='10.0.0.2/8' ) # Create switches s1 = net.addSwitch( 's1') net.addLink(h1, s1, ) net.addLink(h2, s1, ) # Add Controllers odl_ctrl = net.addController( 'c0', controller=RemoteController, ip=CONTROLLER_IP) info( "*** Creation de l'architecture réseau\n" ) net.build() # Connect each switch to a different controller s1.start( [odl_ctrl] ) info( "*** Ajout de l'interface",interface_name,"au switch" ) _intf = Intf( interface_name, node=s1) net.start() CLI( net ) net.stop()
def bbnet(): "Create network and run Buffer Bloat experiment" print "starting mininet ...." # Seconds to run iperf; keep this very high seconds = 3600 start = time() # Reset to known state topo = StarTopo(n=args.n, bw_host=args.bw_host, delay='%sms' % args.delay, bw_net=args.bw_net, maxq=args.maxq, diff=args.diff) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, autoPinCpus=True, controller=OVSController) net.start() dumpNodeConnections(net.hosts) net.pingAll() print args.diff if args.diff: print "Differentiate Traffic Between iperf and wget" os.system("bash tc_cmd_diff.sh") else: print "exec tc_cmd.sh" os.system("bash tc_cmd.sh %s" % args.maxq) sleep(2) ping_latency(net) print "Initially, the delay between two hosts is around %dms" % (int(args.delay)*2) h2 = net.getNodeByName('h2') h1 = net.getNodeByName('h1') h1.cmd('cd ./http/; nohup python2.7 ./webserver.py &') h1.cmd('cd ../') h2.cmd('iperf -s -w 16m -p 5001 -i 1 > iperf-recv.txt &') CLI( net ) h1.cmd("sudo pkill -9 -f webserver.py") h2.cmd("rm -f index.html*") Popen("killall -9 cat", shell=True).wait()
def emptyNet(): "Create an empty network and add nodes to it." net = Mininet( controller=lambda a: RemoteController(a, ip='128.208.125.60' )) info( '*** Adding controller\n' ) net.addController( 'c0' ) info( '*** Adding hosts\n' ) h1 = net.addHost( 'h1', ip='10.0.0.1' ) h2 = net.addHost( 'h2', ip='10.0.0.2' ) info( '*** Adding switch\n' ) s3 = net.addSwitch( 's3' ) s4 = net.addSwitch( 's4' ) s5 = net.addSwitch( 's5' ) info( '*** Creating links\n' ) net.addLink( h1, s3 ) net.addLink( h2, s5 ) net.addLink( s3 , s4 ) net.addLink( s4 , s5 ) info( '*** Starting network\n') net.start() info( '*** Running CLI\n' ) CLI( net ) info( '*** Stopping network' ) net.stop()
def sdnnet(opt): topo = SDNTopo() info( '*** Creating network\n' ) net = Mininet( topo=topo, controller=MyController, link=TCLink) host = [] for i in range (8): host.append(net.get( 'host%d' % i )) net.start() core_sw = [] for i in range (2): name_suffix = '%s' % NWID + '0c' + '%02d' % i net.get('sw' + name_suffix).attach('tap%s0' % NWID) for i in range (8): host[i].defaultIntf().setIP('192.168.10.10%d/24' % i) root = [] for i in range (8): root.append(net.get( 'root%d' % i )) for i in range (8): host[i].intf('host%d-eth1' % i).setIP('1.1.%d.1/24' % i) root[i].intf('root%d-eth0' % i).setIP('1.1.%d.2/24' % i) stopsshd () startsshds ( host ) if opt=="cli": CLI(net) stopsshd() net.stop()
def runMultiLink(): "Create and run multiple link network" topo = simpleMultiLinkTopo( n=2 ) net = Mininet( topo=topo ) net.start() CLI( net ) net.stop()
def run( n ): topo = OpticalTopo( n ) net = Mininet( topo=topo, controller=RemoteController, autoSetMacs=True ) net.start() #installStaticFlows( net ) CLI( net ) net.stop()
def myNet(): MultiSwitch13 = partial( MultiSwitch, protocols='OpenFlow13' ) #tree_topo = TreeTopo(depth=3,fanout=2) tree_topo = SingleSwitchTopo(n=14) net = Mininet(controller=RemoteController, topo=tree_topo, switch=MultiSwitch13, build=False, autoSetMacs=True) info( '*** Adding controllers\n') #c1 = net.addController('c1', controller=RemoteController, ip="127.0.0.1", port=6633) c1 = net.addController('c1', controller=RemoteController, ip="192.168.1.1", port=6633) c2 = net.addController('c2', controller=RemoteController, ip="192.168.1.2", port=6633) c3 = net.addController('c3', controller=RemoteController, ip="192.168.1.3", port=6633) # info( '*** Add hosts\n') # h1 = net.addHost( 'h1', ip='10.0.0.1' ) # h2 = net.addHost( 'h2', ip='10.0.0.2' ) # h3 = net.addHost( 'h3', ip='10.0.0.3' ) # h4 = net.addHost( 'h4', ip='10.0.0.4' ) # info( '*** Add switches\n') # s1 = net.addSwitch( 's1', cls=OVSKernelSwitch, protocols='OpenFlow13' ) # s2 = net.addSwitch( 's2', cls=OVSKernelSwitch, protocols='OpenFlow13' ) # s3 = net.addSwitch( 's3', cls=OVSKernelSwitch, protocols='OpenFlow13' ) # s4 = net.addSwitch( 's4', cls=OVSKernelSwitch, protocols='OpenFlow13' ) # info( '*** Add links\n') # s1.linkTo( h1 ) # s1.linkTo( s2 ) # s2.linkTo( h2 ) # s2.linkTo( s3 ) # s3.linkTo( h3 ) # s3.linkTo( s4 ) # s4.linkTo( h4 ) info( '*** Starting network\n') net.build() info( '*** Starting controllers\n') c1.start() c2.start() c3.start() # info( '*** Starting switches\n') # s1.start([c1,c2,c3]) # s2.start([c1,c2,c3]) # s3.start([c1,c2,c3]) # s4.start([c1,c2,c3]) net.start() net.staticArp() # i = 0; # while i < 10: # h1, h2 = random.choice(net.hosts), random.choice(net.hosts) # print h1.IP(), "-->", h2.IP() # sent, received, rttmin, rttavg, rttmax, rttdev = ping(h1, h2) # print received,"/",sent # i = i + 1 # sleep(1) CLI( net ) net.stop()
def test(): topo = DssTopo() net = Mininet(topo, link=TCLink) net.start() pidList(net) global NPAUSE global NRESUME NPAUSE = 'sudo /home/kd/VirtualTimeKernel/test_virtual_time/freeze_all_procs -f -p %s'%pIDS NRESUME ='sudo /home/kd/VirtualTimeKernel/test_virtual_time/freeze_all_procs -u -p %s'%pIDS #block print(net.get('h1').cmd('ping -c 1 10.0.0.2')) net.get('h1').cmd('ping -c 40 10.0.0.2 > %sbl.test'% FileOut) #dont block net.get('h1').cmd('/home/kd/VirtualTimeKernel/iputils/ping -c 40 -D 10.0.0.2 > %svt.test &'% FileOut) time.sleep(5) for x in range(0,int(sys.argv[3])): print 'pausing' pause() time.sleep(stime) print 'resumed' time.sleep(30) net.stop()
def bufferbloat(**kwargs): # Linux uses CUBIC-TCP by default that doesn't have the usual sawtooth # behaviour. For those who are curious, replace reno with cubic # see what happens... # sysctl -a | grep cong should list some interesting parameters. os.system("sysctl -w net.ipv4.tcp_congestion_control=reno") # create the topology and network topo = BBTopo(int(kwargs['queue_size']), int(kwargs['ping_RTT'])) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, controller= OVSController) net.start() # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() # Start all the monitoring processes start_tcpprobe("cwnd.txt") # TODO: Start monitoring the queue sizes. Since the switch I # created is "s0", I monitor one of the interfaces. Which # interface? The interface numbering starts with 1 and increases. # Depending on the order you add links to your network, this # number may be 1 or 2. Ensure you use the correct number. # qmon = start_qmon(...) qmon = start_qmon(iface='s0-eth2', outfile='%s/q.txt' % ".") # TODO: Start iperf, pings, and the webserver. # start_iperf(net), ... start_iperf(net, kwargs['congestion_window']) start_webserver(net) start_ping(net) # TODO: measure the time it takes to complete webpage transfer # from h1 to h2 (say) 4-5 times. Hint: check what the following # command does: curl -o /dev/null -s -w %{time_total} google.com # Now use the curl command to fetch webpage from the webserver you # spawned on host h1 (not from google!) print "starting timing tester" timing_results = timing_tester(net) # TODO: compute average (and standard deviation) of the fetch # times. You don't need to plot them. Just print them # here and explain your observations in the Questions part # in Part 2, where you analyze your measurements. print timing_results[0] print "Ave fetching time: %.4f" % numpy.average(numpy.array(timing_results).astype(numpy.float)) print "std dev. of fetching times: %.4f" % numpy.std(numpy.array(timing_results).astype(numpy.float)) # Stop probing stop_tcpprobe() qmon.terminate() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def main(): # All b/w are in megabits max_bw = 1000 # B/w of queue 1 and queue 2 values = [3,1,4] topo = MyTopo(max_bw) net = Mininet(topo, link = TCLink, controller = partial(RemoteController, ip = '127.0.0.1', port = 6633)) net.start() # Queue set in between h1 and s1 cmd = 'ovs-vsctl -- set Port s2-eth4 qos=@newqos -- \ --id=@newqos create QoS type=linux-htb other-config:max-rate=1000000000 queues=0=@q0,1=@q1,2=@q2,3=@q3 -- \ --id=@q0 create Queue other-config:min-rate=%d other-config:max-rate=%d -- \ --id=@q1 create Queue other-config:min-rate=%d other-config:max-rate=%d -- \ --id=@q2 create Queue other-config:min-rate=%d other-config:max-rate=%d -- \ --id=@q3 create Queue other-config:min-rate=%d other-config:max-rate=%d' % \ (max_bw * 10**6, max_bw * 10**6, \ values[0] * 10**6, values[0] * 10**6, \ values[1] * 10**6, values[1] * 10**6, \ values[2] * 10**6, values[2] * 10**6) sp.call(cmd, shell = True) CLI(net) net.stop()
def emptyNet(): "Create an empty network and add nodes to it." net = Mininet( topo=None, build=False) net.addController( 'c0', controller=RemoteController, ip='0.0.0.0' ) h1 = net.addHost( 'h1', ip='10.0.0.1' ) h2 = net.addHost( 'h2', ip='10.0.0.2' ) h3 = net.addHost( 'h3', ip='10.0.0.3' ) s1 = net.addSwitch( 's1', cls=OVSSwitch ) net.addLink( h1, s1 ) net.addLink( h2, s1 ) net.addLink( h3, s1 ) net.start() s1.cmd('ifconfig s1 inet 10.0.0.10') CLI( net ) net.stop()
def LowMnNet(n, nn, nnn): print "############ Instanitiate a mininet of topology LowMnTopo ###############\n" topo = LowMnTopo(n, nn, nnn) ctrl_port=6633 net = Mininet(topo=topo, ipBase='10.0.0.0/8', autoSetMacs=True, host=CPULimitedHost, link=TCLink) net.start() return net
def test(): topo = DssTopo() net = Mininet(topo, link=TCLink) net.start() with open("pause.test","a") as myfile: for x in range(0,2000): st1 = net.pingPairFull() #print 'pausing' time.sleep(0.01) pause() myfile.write( repr(st1[0][2][3])) myfile.write('\n') with open("noPause.test","a") as myffile: for x in range(0,2000): st1 = net.pingPairFull() #print 'pausing' time.sleep(0.01) #pause() myffile.write( repr(st1[0][2][3])) myffile.write('\n') net.stop()
def testLinkLoss( self ): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting packet loss with %d%% loss rate\n' 'number of dropped pings during mininet.ping(): %s\n' 'expected number of dropped packets: 1\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % ( LOSS_PERCENT, dropped_total, N, loptsStr, self.switchClass ) ) self.assertGreater( dropped_total, 0, msg )
def testCPULimits( self ): "Verify topology creation with CPU limits set for both schedulers." CPU_FRACTION = 0.1 CPU_TOLERANCE = 0.8 # CPU fraction below which test should fail hopts = { 'cpu': CPU_FRACTION } #self.runOptionsTopoTest( N, hopts=hopts ) mn = Mininet( SingleSwitchOptionsTopo( n=N, hopts=hopts ), host=CPULimitedHost, switch=self.switchClass, waitConnected=True ) mn.start() results = mn.runCpuLimitTest( cpu=CPU_FRACTION ) mn.stop() hostUsage = '\n'.join( 'h%s: %s' % ( n + 1, results[ (n - 1) * 5 : (n * 5) - 1 ] ) for n in range( N ) ) hoptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in hopts.items() ) msg = ( '\nTesting cpu limited to %d%% of cpu per host\n' 'cpu usage percent per host:\n%s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'hopts = %s\n' 'host = CPULimitedHost\n' 'Switch = %s\n' % ( CPU_FRACTION * 100, hostUsage, N, hoptsStr, self.switchClass ) ) for pct in results: #divide cpu by 100 to convert from percentage to fraction self.assertWithinTolerance( pct/100, CPU_FRACTION, CPU_TOLERANCE, msg )
def perfTest(): topo = crazy_switches() net = Mininet(topo=topo, controller=lambda name: RemoteController( 'c0', '127.0.0.1' ), host=CPULimitedHost, link=TCLink) net.start() CLI(net) net.stop()
def run_cellsim_topology(qdisc, random_seed): print_welcome_message() os.system( "killall -q controller" ) os.system( "killall -q cellsim" ) os.system( "killall -q datagrump-sender" ) os.system( "killall -q datagrump-receiver" ) os.system( "service apache2 stop" ) os.system( "killall -q apache2" ) os.system( "killall -q on-off.py") topo = ProtoTester() net = Mininet(topo=topo, host=Host, link=Link) net.start() sender = net.getNodeByName('sender') LTE = net.getNodeByName('LTE') receiver = net.getNodeByName('receiver') set_all_IP(net, sender, LTE, receiver) #Dump connections #dumpNodeConnections(net.hosts) #display_routes(net, sender, LTE, receiver) run_apache(sender) run_flowrequestr(receiver, random_seed) run_cellsim(LTE, qdisc) # CLI(net) net.stop()
def run_cellsim_topology(): print_welcome_message() os.system( "killall -q controller" ) os.system( "killall -q cellsim" ) os.system( "killall -q mysender" ) os.system( "killall -q myreceiver" ) topo = ProtoTester() net = Mininet(topo=topo, host=Host, link=Link) net.start() sender = net.getNodeByName('sender') LTE = net.getNodeByName('LTE') receiver = net.getNodeByName('receiver') set_all_IP(net, sender, LTE, receiver) #Dump connections #dumpNodeConnections(net.hosts) #display_routes(net, sender, LTE, receiver) #run_cellsim(LTE) run_datagrump(sender, receiver) run_cellsim(LTE) # CLI(net) net.stop()
def demo(): "Rogue Webdd server demonstration" #checkRequired() topo = AfterBandwidthTestHubDemo() net = Mininet(topo=topo, controller=lambda name: RemoteController( name='c0', ip='192.168.56.104'), link=TCLink, switch=OVSKernelSwitch, autoSetMacs=True) popens = {} server = net.get('h4') #c = net.get('c0') net.start() # (Banwidth Test After Apply Traffic Priorization Correlationed witn Classification by Application Class(HTTP, DNS, FTP)) on Controller RYU #net.get('s1').cmd('cd qos-command &') #net.get('s1').cmd('sudo ./ovs-qos-run s1-eth4' ) # (Executing OVSDB Queue on Switches) #switches = net.switches for switch in net.switches: if net.switches[-1].name != switch.name: print switch.intfList()[-1] #popens[switch] = switch.popen('cd qos-command &') #popens[switch] = switch.popen('sudo ./ovs-qos-run ' + str(switch.intfList()[-1]) + ' &' ) popens[switch] = switch.popen( 'ovs-vsctl set port ' + str(switch.intfList()[-1]) + ' qos=@newqos -- --id=@newqos create QoS type=linux-htb other-config:max-rate=7000000 queues=0=@q0,1=@q1,2=@q2,3=@q3 -- --id=@q0 create Queue other-config:max-rate=1000000 -- --id=@q1 create Queue other-config:min-rate=1000000 other-config:max-rate=2000000 -- --id=@q2 create Queue other-config:min-rate=3000000 other-config:max-rate=4000000 -- --id=@q3 create Queue other-config:min-rate=5000000 other-config:max-rate=6000000' ) # (Starting up HTTP Server) webdir = '/tmp/webserver' popens[server] = server.popen("cd ", webdir, " &") popens[server] = server.popen( "python -u-m SimpleHTTPServer 80 >& /tmp/http.log &") sleep(1) # (Starting up DNS Server) popens[server] = server.popen('dnsmasq -k -A /#/%s 1>/tmp/dns.log 2>&1 &' % server.IP()) sleep(1) # (Starting up FTP Server) """popens[server] = server.popen("inetd") #sleep(1)""" # (Generate Flow by Class(HTTP, DNS, FTP) for client in net.hosts: if client.name == "h1": popens[client] = client.popen("wget -O - {}".format(server.IP())) if client.name == "h2": popens[client] = client.popen("nslookup 10.0.0.4") if client.name == "h3": popens[client] = client.popen('curl ftp://' + server.IP() + ' --user ubuntu:ubuntu') # (Stoping DNS Server) popens[server] = server.popen('kill %dnsmasq') # (Stoping HTTP Server) popens[server] = server.popen('kill %python &') #server.cmd('kill %python &') popens[server] = server.popen('iperf -s -p 5003 &') for client in net.hosts: if client.name != server.name: #print client popens[client] = client.popen('iperf -c ' + server.IP() + ' -p 5003') try: for host, line in util.pmonitor(popens): if host: print(host.name, line) finally: # Don't leave things running if this script crashes. for process in popens.values(): if not process.poll(): process.kill() net.stop()
def goDashBedNet(): "Create network and run experiment" # lets start to check on the script arguements: if args.serverType != "ASGI" and args.serverType != "WSGI": print("\n**** Incorrect Web Server has been choosen ****") print( "**** Please choose either ASGI (Hypercorn) or WSGI (Caddy and QUIC) ****\n" ) sys.exit(0) print("preparing config files for goDASH") # lets read in the goDASH config file cwd = os.getcwd() config_direct = cwd + "/config" config_file = config_direct + "/configure.json" # lets read in the original config file and create a dictionary we can use _dict = create_dict(config_file) # print("How this dict looks alike: ") test_dict = create_dict_from_json(config_file) # lets create the log and config folder locations output_folder = cwd + output_folder_name print("starting mininet ....") # all voip clients are run one one mininet node with different ports if int(args.voipclients) > 0: num_voip = 1 else: num_voip = 0 # all voip clients will be handled by one host, plus one host storing video content # TODO: add support for web traffic total_num_hosts = int(args.videoclients) + num_voip + 1 print("Total number of host : " + str(total_num_hosts)) # for each run for run in range(1, 1 + int(args.numruns)): trace_files = [ "traces/" + f for f in listdir("traces/") if isfile(join("traces/", f)) ] # for each trace file for trace_file in trace_files: # Create topology topo = TwoSwitchTopo(total_num_hosts=total_num_hosts) net = Mininet(controller=Controller, link=TCLink, topo=topo) net.start() # get voip client host - it is the last voip_host = net.getNodeByName('h%d' % (total_num_hosts)) # leaverage D-ITG capabilites prepare_voip_clients(int(args.voipclients), voip_host, 1000 * int(args.duration)) # wait for 5 seconds sleep(5) dumpNodeConnections(net.hosts) #print (pid_python) serverHost = net.getNodeByName('h1') ip_address_sh = serverHost.cmdPrint( "ifconfig %s-eth0 | grep inet | awk '{print $2}' | sed 's/addr://'" % serverHost.name).split()[0] # start consul if args.collaborative == "on": consul = net.getNodeByName('c1') #consul.cmd("consul -force-leave") print("starting consul") ttt = consul.cmd( "consul agent -dev -client 10.0.0.2 > ./output/consul_log.out &" ) sleep(5) # stop the apache server tt4 = serverHost.cmd("sudo systemctl stop apache2.service") # check to see if Caddy/Example or Hypercorn is being used if args.serverType == "WSGI": print("Calling WSGI Server...", end=" ") if args.transport_mode == "quic": print("- QUIC enabled...") tt = serverHost.cmd( "sudo setcap CAP_NET_BIND_SERVICE=+eip caddy") tt2 = serverHost.cmd( 'caddy start --config ./caddy-config/TestbedTCP/CaddyFilev2QUIC --adapter caddyfile' ) elif args.transport_mode == "tcp": print("- TCP HTTPS enabled...") tt = serverHost.cmd( "sudo setcap CAP_NET_BIND_SERVICE=+eip caddy") tt2 = serverHost.cmd( 'caddy start --config ./caddy-config/TestbedTCP/CaddyFilev2TCP --adapter caddyfile' ) elif args.serverType == "ASGI": print("Calling Hypercorn ASGI Server...", end=" ") tt1 = serverHost.cmd( "sudo setcap CAP_NET_BIND_SERVICE=+eip hypercorn") if args.transport_mode == "quic": print("- QUIC enabled...") # print("For ASGI, please select --tm \"tcp\"\n") # clean_up(voip_host) # sys.exit(0) tt = serverHost.cmd("hypercorn" " hypercorn_goDASHbed_quic:app &") elif args.transport_mode == "tcp": # this permits http to https redirection - if we need it # tt = serverHost.cmd( # "hypercorn"\ # " --certfile ../goDASH/godash/http/certs/cert.pem"\ # " --keyfile ../goDASH/godash/http/certs/key.pem"\ # " --bind www.goDASHbed.org:443"\ # " --insecure-bind www.goDASHbed.org:80"\ # " hypercorn_goDASHbed:redirected_app &") # lets do this dependent on the structure of the input urls if "https" in urls[0]: print("- TCP HTTPS enabled...") tt = serverHost.cmd( "hypercorn" # " --certfile ../goDASH/godash/http/certs/cert.pem" # " --keyfile ../goDASH/godash/http/certs/key.pem" # " --bind www.goDASHbed.org:443" " hypercorn_goDASHbed:app &") else: print("- TCP HTTP enabled...") tt = serverHost.cmd("hypercorn" # " --bind www.goDASHbed.org:80" " hypercorn_goDASHbed:app &") sleep(3) # get ip address of server host s1 = net.getNodeByName('s1') s0 = net.getNodeByName('s0') print("Load bw values from trace: " + trace_file) if ".csv" in trace_file: bw_a = readCsvThr(trace_file) print("Setting fifo queueing discipline") getVersion = subprocess.Popen("bash tc_fifo.sh %s %d" % ("s1-eth1", args.bw_net), shell=True, stdout=subprocess.PIPE).stdout for intf in s0.intfList()[2:]: getVersion2 = subprocess.Popen("bash tc_delay.sh %s %d" % (intf.name, 10), shell=True, stdout=subprocess.PIPE).stdout sleep(5) # create a folder based on date and time for each run current_folder = "/" + datetime.datetime.now().strftime( '%Y-%m-%d-%H-%M-%S') # - config config_folder = output_folder+"/R" + \ str(run)+current_folder+config_folder_name # lets create the output folder structure if not os.path.exists(config_folder): os.makedirs(config_folder) print('output_folder: ' + output_folder) print('current folder: ' + current_folder) subfolder = output_folder + '/R' + str( run) + current_folder + '/voip/' if not os.path.exists(subfolder): os.system("mkdir -p %s" % subfolder) # start voip clients start_voip_clients(serverHost, voip_host, int(args.voipclients), subfolder, run) # start the video clients and save as an array of processes processes = start_video_clients(args.videoclients, test_dict['adapt'], net, run, num_clients=total_num_hosts, output_folder=output_folder, current_folder=current_folder, config_folder=config_folder, dic=test_dict, cwd=cwd) # CLI(net) # lets start throttling the link tl = ThrottleLink() t = Thread(target=tl.run, args=(bw_a, )) t.start() # lets check if the client have completed vc = threading.Thread(target=video_clients_completed( processes, tl), daemon=True) vc.start() # once the client complete, lets stop the throttling tl.terminate() sleep(2) print("all godash clients have finished streaming") # reset the system network os.system( "tc class change dev s1-eth1 parent 1:0 classid 1:1 htb rate %fkbit ceil %fkbit" % (10000, 10000)) genstats_voip_clients(serverHost, voip_host, int(args.voipclients), subfolder, run, current_folder) net.stop() if args.transport_mode == "tcp" and args.serverType == "WSGI": Popen("pgrep -f caddy | xargs kill -9", shell=True).wait() caddy_s1 = serverHost.popen("rm ./output/caddy_access.log") if args.transport_mode == "quic" and args.serverType == "WSGI": Popen("pgrep -f example | xargs kill -9", shell=True).wait() if args.collaborative == "on": # lets stop consul os.system("killall -9 consul")
class Topogen(object): "A topology test builder helper." CONFIG_SECTION = "topogen" def __init__(self, cls, modname="unnamed"): """ Topogen initialization function, takes the following arguments: * `cls`: the topology class that is child of mininet.topo * `modname`: module name must be a unique name to identify logs later. """ self.config = None self.topo = None self.net = None self.gears = {} self.routern = 1 self.switchn = 1 self.modname = modname self.errorsd = {} self.errors = "" self.peern = 1 self._init_topo(cls) logger.info("loading topology: {}".format(self.modname)) @staticmethod def _mininet_reset(): "Reset the mininet environment" # Clean up the mininet environment os.system("mn -c > /dev/null 2>&1") def _init_topo(self, cls): """ Initialize the topogily provided by the user. The user topology class must call get_topogen() during build() to get the topogen object. """ # Set the global variable so the test cases can access it anywhere set_topogen(self) # Test for MPLS Kernel modules available self.hasmpls = False if not topotest.module_present("mpls-router"): logger.info("MPLS tests will not run (missing mpls-router kernel module)") elif not topotest.module_present("mpls-iptunnel"): logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)") else: self.hasmpls = True # Load the default topology configurations self._load_config() # Initialize the API self._mininet_reset() cls() self.net = Mininet(controller=None, topo=self.topo) for gear in self.gears.values(): gear.net = self.net def _load_config(self): """ Loads the configuration file `pytest.ini` located at the root dir of topotests. """ self.config = configparser.ConfigParser(tgen_defaults) pytestini_path = os.path.join(CWD, "../pytest.ini") self.config.read(pytestini_path) def add_router(self, name=None, cls=topotest.Router, **params): """ Adds a new router to the topology. This function has the following options: * `name`: (optional) select the router name * `daemondir`: (optional) custom daemon binary directory * `routertype`: (optional) `quagga` or `frr` Returns a TopoRouter. """ if name is None: name = "r{}".format(self.routern) if name in self.gears: raise KeyError("router already exists") params["frrdir"] = self.config.get(self.CONFIG_SECTION, "frrdir") params["quaggadir"] = self.config.get(self.CONFIG_SECTION, "quaggadir") params["memleak_path"] = self.config.get(self.CONFIG_SECTION, "memleak_path") if not params.has_key("routertype"): params["routertype"] = self.config.get(self.CONFIG_SECTION, "routertype") self.gears[name] = TopoRouter(self, cls, name, **params) self.routern += 1 return self.gears[name] def add_switch(self, name=None, cls=topotest.LegacySwitch): """ Adds a new switch to the topology. This function has the following options: name: (optional) select the switch name Returns the switch name and number. """ if name is None: name = "s{}".format(self.switchn) if name in self.gears: raise KeyError("switch already exists") self.gears[name] = TopoSwitch(self, cls, name) self.switchn += 1 return self.gears[name] def add_exabgp_peer(self, name, ip, defaultRoute): """ Adds a new ExaBGP peer to the topology. This function has the following parameters: * `ip`: the peer address (e.g. '1.2.3.4/24') * `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1') """ if name is None: name = "peer{}".format(self.peern) if name in self.gears: raise KeyError("exabgp peer already exists") self.gears[name] = TopoExaBGP(self, name, ip=ip, defaultRoute=defaultRoute) self.peern += 1 return self.gears[name] def add_link(self, node1, node2, ifname1=None, ifname2=None): """ Creates a connection between node1 and node2. The nodes can be the following: * TopoGear * TopoRouter * TopoSwitch """ if not isinstance(node1, TopoGear): raise ValueError("invalid node1 type") if not isinstance(node2, TopoGear): raise ValueError("invalid node2 type") if ifname1 is None: ifname1 = node1.new_link() if ifname2 is None: ifname2 = node2.new_link() node1.register_link(ifname1, node2, ifname2) node2.register_link(ifname2, node1, ifname1) self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2) def get_gears(self, geartype): """ Returns a dictionary of all gears of type `geartype`. Normal usage: * Dictionary iteration: ```py tgen = get_topogen() router_dict = tgen.get_gears(TopoRouter) for router_name, router in router_dict.iteritems(): # Do stuff ``` * List iteration: ```py tgen = get_topogen() peer_list = tgen.get_gears(TopoExaBGP).values() for peer in peer_list: # Do stuff ``` """ return dict( (name, gear) for name, gear in self.gears.iteritems() if isinstance(gear, geartype) ) def routers(self): """ Returns the router dictionary (key is the router name and value is the router object itself). """ return self.get_gears(TopoRouter) def exabgp_peers(self): """ Returns the exabgp peer dictionary (key is the peer name and value is the peer object itself). """ return self.get_gears(TopoExaBGP) def start_topology(self, log_level=None): """ Starts the topology class. Possible `log_level`s are: 'debug': all information possible 'info': informational messages 'output': default logging level defined by Mininet 'warning': only warning, error and critical messages 'error': only error and critical messages 'critical': only critical messages """ # If log_level is not specified use the configuration. if log_level is None: log_level = self.config.get(self.CONFIG_SECTION, "verbosity") # Set python logger level logger_config.set_log_level(log_level) # Run mininet if log_level == "debug": setLogLevel(log_level) logger.info("starting topology: {}".format(self.modname)) self.net.start() def start_router(self, router=None): """ Call the router startRouter method. If no router is specified it is called for all registred routers. """ if router is None: # pylint: disable=r1704 for _, router in self.routers().iteritems(): router.start() else: if isinstance(router, str): router = self.gears[router] router.start() def stop_topology(self): """ Stops the network topology. This function will call the stop() function of all gears before calling the mininet stop function, so they can have their oportunity to do a graceful shutdown. stop() is called twice. The first is a simple kill with no sleep, the second will sleep if not killed and try with a different signal. """ logger.info("stopping topology: {}".format(self.modname)) errors = "" for gear in self.gears.values(): errors += gear.stop() if len(errors) > 0: assert "Errors found post shutdown - details follow:" == 0, errors self.net.stop() def mininet_cli(self): """ Interrupt the test and call the command line interface for manual inspection. Should be only used on non production code. """ if not sys.stdin.isatty(): raise EnvironmentError( "you must run pytest with '-s' in order to use mininet CLI" ) CLI(self.net) def is_memleak_enabled(self): "Returns `True` if memory leak report is enable, otherwise `False`." # On router failure we can't run the memory leak test if self.routers_have_failure(): return False memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get( self.CONFIG_SECTION, "memleak_path" ) if memleak_file is None: return False return True def report_memory_leaks(self, testname=None): "Run memory leak test and reports." if not self.is_memleak_enabled(): return # If no name was specified, use the test module name if testname is None: testname = self.modname router_list = self.routers().values() for router in router_list: router.report_memory_leaks(self.modname) def set_error(self, message, code=None): "Sets an error message and signal other tests to skip." logger.info(message) # If no code is defined use a sequential number if code is None: code = len(self.errorsd) self.errorsd[code] = message self.errors += "\n{}: {}".format(code, message) def has_errors(self): "Returns whether errors exist or not." return len(self.errorsd) > 0 def routers_have_failure(self): "Runs an assertion to make sure that all routers are running." if self.has_errors(): return True errors = "" router_list = self.routers().values() for router in router_list: result = router.check_router_running() if result != "": errors += result + "\n" if errors != "": self.set_error(errors, "router_error") assert False, errors return True return False
def createNetwork(): #send rate at each link in Mbps bwg = 1 #in Mbps bwbn = 1 #in Mbps loss = 8 #in % mqs = 100 #max queue size of interfaces dly = '2.5ms' #create empty network net = Mininet(intf=TCIntf) info( '\n*** Adding controller\n' ) net.addController( 'c0' ) #is it ok ? #add host to topology ht = net.addHost( 'ht', ip='10.10.0.1/24' ) hu = net.addHost( 'hu', ip='10.10.0.2/24' ) it = net.addHost( 'it', ip='10.20.0.1/24' ) iu = net.addHost( 'iu', ip='10.20.0.2/24' ) rh = net.addHost('rh', ip='10.10.0.10/24') ri = net.addHost('ri', ip='10.20.0.20/24') info('\n** Adding Switches\n') # Adding 2 switches to the network sw1 = net.addSwitch('sw1') sw2 = net.addSwitch('sw2') info('\n** Creating Links \n') #create link beetween the network link_ht_sw1 = net.addLink( ht, sw1) link_hu_sw1 = net.addLink( hu, sw1) link_rh_sw1 = net.addLink( rh, sw1, intfName1='rh-eth0') link_it_sw2 = net.addLink( it, sw2) link_iu_sw2 = net.addLink( iu, sw2) link_ri_sw2 = net.addLink( ri, sw2, intfName1='ri-eth0') link_rh_ri = net.addLink( rh, ri, intfName1='rh-eth1', intfName2='ri-eth1') #set bandwith link_ht_sw1.intf1.config( bw = bwbn, max_queue_size = mqs) link_hu_sw1.intf1.config( bw = bwbn, max_queue_size = mqs) link_rh_sw1.intf1.config( bw = bwbn, max_queue_size = mqs) #max_queue_size is hardcoded low to prevent bufferbloat, too high queuing delays link_it_sw2.intf1.config( bw = bwg, max_queue_size = mqs) link_iu_sw2.intf1.config( bw = bwg, max_queue_size = mqs) link_ri_sw2.intf1.config( bw = bwg, max_queue_size = mqs, delay=dly) #delay is set at ri on both interfaces link_rh_ri.intf1.config( bw = bwg, max_queue_size = mqs, loss=loss) #loss is set at rh on its interface to ri only link_ht_sw1.intf2.config( bw = bwbn, max_queue_size = mqs) link_hu_sw1.intf2.config( bw = bwbn, max_queue_size = mqs) link_rh_sw1.intf2.config( bw = bwbn, max_queue_size = mqs) link_it_sw2.intf2.config( bw = bwg, max_queue_size = mqs) link_iu_sw2.intf2.config( bw = bwg, max_queue_size = mqs) link_ri_sw2.intf2.config( bw = bwg, max_queue_size = mqs) link_rh_ri.intf2.config( bw = bwg, max_queue_size = mqs, delay=dly) #delay is set at ri on both interfaces net.start() info( '\n*** Configuring hosts\n' ) rh.cmd('ifconfig rh-eth1 10.12.0.10 netmask 255.255.255.0') #reconfiguring mutiples intefaces host to prevent mininet strange initialisation behaviors rh.cmd('ifconfig rh-eth0 10.10.0.10 netmask 255.255.255.0') rh.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') #enable forwarding at routers ri.cmd('ifconfig ri-eth1 10.12.0.20 netmask 255.255.255.0') #reconfiguring mutiples intefaces host to prvent mininet strange initialisation behaviors ri.cmd('ifconfig ri-eth0 10.20.0.20 netmask 255.255.255.0') ri.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') #enable forwarding at routers #configure host default gateways ht.cmd('ip route add default via 10.10.0.10') hu.cmd('ip route add default via 10.10.0.10') it.cmd('ip route add default via 10.20.0.20') iu.cmd('ip route add default via 10.20.0.20') #configure router routing tables rh.cmd('ip route add default via 10.12.0.20') ri.cmd('ip route add default via 10.12.0.10') # weiyu: iu.cmd('touch server.pcap') hu.cmd('touch client.pcap') rh.cmd('tc qdisc del dev rh-eth1 root') start_nodes(rh, ri, iu, hu, mqs) #experiment actions it.cmd('ethtool -K it-eth0 tx off sg off tso off') #disable TSO on TCP on defaul TCP sender need to be done on other host if sending large TCP file from other nodes time.sleep(1) # Enable the mininet> prompt if uncommented info('\n*** Running CLI\n') CLI(net) # stops the simulation net.stop()
class Experimento: """ Clase usada para representar un experimento ... Attributes ---------- net inputs trafico Methods ------- configureParams getUnidadExperimental configurarTrafico killTest killController startTest endTest startCLI pingAllTest """ def __init__(self): self.net = None self.inputs = None self.trafico = None def configureParams(self, ue): self.inputs = ue self.net = Mininet(controller=ue.getController(), switch=OVSSwitch, build=False, link=TCLink, topo=ue.getTopo()) sleep(5) # Dando un tiempo de espera para que el controlador arranque self.net.build() # Metodo para configurar la unidad experimental def getUnidadExperimental(self): return self.inputs # Metodo para configurar el trafico def configurarTrafico(self, tipo='normal'): nodos_claves = self.inputs.obtenerNodosClaves() if tipo == 'normal': h_c = self.net.get(nodos_claves[1]) h_v = self.net.get(nodos_claves[2]) self.trafico = TraficoNormal(h_c, h_v) elif tipo == 'ataque': h_a = self.net.get(nodos_claves[0]) h_c = self.net.get(nodos_claves[1]) h_v = self.net.get(nodos_claves[2]) self.trafico = TraficoAtaque(h_a, h_c, h_v) def killTest(self): subprocess.call(["mn", "-c"]) def killController(self, port=6653): subprocess.Popen(['sudo', 'fuser', '-k', str(port) + '/tcp']) def startTest(self): self.net.start() def endTest(self): self.net.stop() def startCLI(self): CLI(self.net) def pingAllTest(self): self.net.pingAll()
def setupTopology(controller_addr): global net, c1, s1, s2, s3 global h1, h2, h3, h4, h5, h6, h7, h8, h9, h10 "Create and run multiple link network" net = Mininet(controller=RemoteController) print "mininet created" c1 = net.addController('c1', ip=controller_addr, port=6653) # h1: IOT Device. # h2 : StatciDHCPD # h3 : router / NAT # h4 : Non IOT device. h1 = net.addHost('h1') h2 = net.addHost('h2') h3 = net.addHost('h3') h4 = net.addHost('h4') h5 = net.addHost('h5') h6 = net.addHost('h6') h7 = net.addHost('h7') h8 = net.addHost('h8') h9 = net.addHost('h9') h10 = net.addHost('h10') hosts.append(h1) hosts.append(h2) hosts.append(h3) hosts.append(h4) hosts.append(h5) hosts.append(h6) hosts.append(h7) hosts.append(h8) hosts.append(h9) hosts.append(h10) s2 = net.addSwitch('s2', dpid="2") s3 = net.addSwitch('s3', dpid="3") s1 = net.addSwitch('s1', dpid="1") s1.linkTo(h1) s1.linkTo(h2) s1.linkTo(h3) s1.linkTo(h4) s1.linkTo(h5) s1.linkTo(h6) s1.linkTo(h7) s2.linkTo(h8) s3.linkTo(h8) s3.linkTo(h9) s3.linkTo(h10) # S2 is the NPE switch. # Direct link between S1 and S2 s1.linkTo(s2) h8.cmdPrint('echo 0 > /proc/sys/net/ipv4/ip_forward') # Flush old rules. h8.cmdPrint('iptables -F') h8.cmdPrint('iptables -t nat -F') h8.cmdPrint('iptables -t mangle -F') h8.cmdPrint('iptables -X') h8.cmdPrint('echo 1 > /proc/sys/net/ipv4/ip_forward') # Set up h3 to be our router (it has two interfaces). # Set up iptables to forward as NAT h8.cmdPrint( 'iptables -t nat -A POSTROUTING -o h8-eth1 -s 10.0.0.0/24 -j MASQUERADE' ) net.build() net.build() c1.start() s1.start([c1]) s2.start([c1]) s3.start([c1]) net.start() # Clean up any traces of the previous invocation (for safety) h1.setMAC("00:00:00:00:00:01", "h1-eth0") h2.setMAC("00:00:00:00:00:02", "h2-eth0") h3.setMAC("00:00:00:00:00:03", "h3-eth0") h4.setMAC("00:00:00:00:00:04", "h4-eth0") h5.setMAC("00:00:00:00:00:05", "h5-eth0") h6.setMAC("00:00:00:00:00:06", "h6-eth0") h7.setMAC("00:00:00:00:00:07", "h7-eth0") h8.setMAC("00:00:00:00:00:08", "h8-eth0") h9.setMAC("00:00:00:00:00:09", "h9-eth0") h10.setMAC("00:00:00:00:00:10", "h10-eth0") # Set up a routing rule on h2 to route packets via h3 h1.cmdPrint('ip route del default') h1.cmdPrint('ip route add default via 10.0.0.8 dev h1-eth0') # Set up a routing rule on h2 to route packets via h3 h2.cmdPrint('ip route del default') h2.cmdPrint('ip route add default via 10.0.0.8 dev h2-eth0') # Set up a routing rule on h2 to route packets via h7 h3.cmdPrint('ip route del default') h3.cmdPrint('ip route add default via 10.0.0.8 dev h3-eth0') # Set up a routing rule on h2 to route packets via h3 h4.cmdPrint('ip route del default') h4.cmdPrint('ip route add default via 10.0.0.8 dev h4-eth0') # Set up a routing rule on h5 to route packets via h3 h5.cmdPrint('ip route del default') h5.cmdPrint('ip route add default via 10.0.0.8 dev h5-eth0') # h6 is a localhost. h6.cmdPrint('ip route del default') h6.cmdPrint('ip route add default via 10.0.0.8 dev h6-eth0') # The IDS runs on h8 h7.cmdPrint('ip route del default') h7.cmdPrint('ip route add default via 10.0.0.8 dev h7-eth0') # h9 is our fake host. It runs our "internet" web server. h9.cmdPrint('ifconfig h9-eth0 203.0.113.13 netmask 255.255.255.0') # Start a web server there. # h10 is our second fake host. It runs another internet web server that we cannot reach h10.cmdPrint('ifconfig h10-eth0 203.0.113.14 netmask 255.255.255.0') # Start dnsmasq (our dns server). h5.cmdPrint( '/usr/sbin/dnsmasq --server 10.0.4.3 --pid-file=/tmp/dnsmasq.pid') # Set up our router routes. h8.cmdPrint('ip route add 203.0.113.13/32 dev h8-eth1') h8.cmdPrint('ip route add 203.0.113.14/32 dev h8-eth1') h8.cmdPrint('ifconfig h8-eth1 203.0.113.1 netmask 255.255.255.0') #subprocess.Popen(cmd,shell=True, stdin= subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) if os.environ.get("UNITTEST") is None or os.environ.get("UNITTEST") == '0': h9.cmdPrint("python -m SimpleHTTPServer 80&") h4.cmdPrint("python -m SimpleHTTPServer 80&") #h3.cmdPrint("python ../util/udpping.py --port 8008 --server &") #h2.cmdPrint("python ../util/udpping.py --port 8008 --server &") #h3.cmdPrint("python ../util/tcp-server.py -P 8010 -H 10.0.0.3 -T 10000 -C&") # Start the IDS on node 8 print "*********** System ready *********"
def run(): topo = TowerTopo() net = Mininet(topo=topo, controller=RemoteController, autoSetMacs=True) net.start() CLI(net) net.stop()
class Solar(object): """ Create a tiered topology from semi-scratch in Mininet """ def __init__(self, cname='onos', cips=['192.168.56.1'], islands=3, edges=2, hosts=2): """Create tower topology for mininet""" # We are creating the controller with local-loopback on purpose to avoid # having the switches connect immediately. Instead, we'll set controller # explicitly for each switch after configuring it as we want. self.ctrls = [ RemoteController(cname, cip, 6653) for cip in cips ] self.net = Mininet(controller=RemoteController, switch = OVSKernelSwitch, build=False) self.cips = cips self.spines = [] self.leaves = [] self.hosts = [] for ctrl in self.ctrls: self.net.addController(ctrl) # Create the two core switches and links between them c1 = self.net.addSwitch('c1',dpid='1111000000000000') c2 = self.net.addSwitch('c2',dpid='2222000000000000') self.spines.append(c1) self.spines.append(c2) self.net.addLink(c1, c2) self.net.addLink(c2, c1) for i in range(1, islands + 1): sc = self.createSpineClump(i, edges, hosts) self.net.addLink(c1, sc[0]) self.net.addLink(c2, sc[0]) self.net.addLink(c1, sc[1]) self.net.addLink(c2, sc[1]) def createSpineClump(self, island, edges, hosts): """ Creates a clump of spine and edge switches with hosts""" s1 = self.net.addSwitch('s%1d1' % island,dpid='00000%1d0100000000' % island) s2 = self.net.addSwitch('s%1d2' % island,dpid='00000%1d0200000000' % island) self.net.addLink(s1, s2) self.net.addLink(s2, s1) for i in range(1, edges + 1): es = self.createEdgeSwitch(island, i, hosts) self.net.addLink(es, s1) self.net.addLink(es, s2) self.spines.append(s1) self.spines.append(s2) clump = [] clump.append(s1) clump.append(s2) return clump def createEdgeSwitch(self, island, index, hosts): """ Creates an edge switch in an island and ads hosts to it""" sw = self.net.addSwitch('e%1d%1d' % (island, index),dpid='0000000%1d0000000%1d' % (island, index)) self.leaves.append(sw) for j in range(1, hosts + 1): host = self.net.addHost('h%d%d%d' % (island, index, j),ip='10.%d.%d.%d' % (island, index, j)) self.net.addLink(host, sw) self.hosts.append(host) return sw def run(self): """ Runs the created network topology and launches mininet cli""" self.net.build() self.net.start() CustomCLI(self.net) self.net.stop() def pingAll(self): """ PingAll to create flows - for unit testing """ self.net.pingAll() def stop(self): "Stops the topology. You should call this after run_silent" self.net.stop()
def createNetwork(): #send rate at each link in Mbps bwg = 0.1#000 #1000 #in Mbps bwbn = 0.1#000 #1000 #25 #in Mbps loss = 80 #1 #2.5 #10 #1 #in % mqs = 100 #0 #1000 #max queue size of interfaces dly = '2.5ms' #'2.5ms 0.5ms'#'1ms 0.5ms' #can take all tc qdisc delay distribution formulations #create empty network net = Mininet(intf=TCIntf) info( '\n*** Adding controller\n' ) net.addController( 'c0' ) #is it ok ? #add host to topology ht = net.addHost( 'ht', ip='10.10.0.1/24' ) hu = net.addHost( 'hu', ip='10.10.0.2/24' ) it = net.addHost( 'it', ip='10.20.0.1/24' ) iu = net.addHost( 'iu', ip='10.20.0.2/24' ) rh = net.addHost('rh', ip='10.10.0.10/24') ri = net.addHost('ri', ip='10.20.0.20/24') info('\n** Adding Switches\n') # Adding 2 switches to the network sw1 = net.addSwitch('sw1') sw2 = net.addSwitch('sw2') info('\n** Creating Links \n') #create link beetween the network link_ht_sw1 = net.addLink( ht, sw1) link_hu_sw1 = net.addLink( hu, sw1) link_rh_sw1 = net.addLink( rh, sw1, intfName1='rh-eth0') link_it_sw2 = net.addLink( it, sw2) link_iu_sw2 = net.addLink( iu, sw2) link_ri_sw2 = net.addLink( ri, sw2, intfName1='ri-eth0') link_rh_ri = net.addLink( rh, ri, intfName1='rh-eth1', intfName2='ri-eth1') #set bandwith link_ht_sw1.intf1.config( bw = bwbn, max_queue_size = mqs) link_hu_sw1.intf1.config( bw = bwbn, max_queue_size = mqs) link_rh_sw1.intf1.config( bw = bwbn, max_queue_size = mqs) #max_queue_size is hardcoded low to prevent bufferbloat, too high queuing delays link_it_sw2.intf1.config( bw = bwg, max_queue_size = mqs) link_iu_sw2.intf1.config( bw = bwg, max_queue_size = mqs) link_ri_sw2.intf1.config( bw = bwg, max_queue_size = mqs, delay=dly) #delay is set at ri on both interfaces # link_rh_ri.intf1.config( bw = bwg, max_queue_size = 10, loss=loss) #loss is set at rh on its interface to ri only link_rh_ri.intf1.config( bw = bwg, max_queue_size = mqs, loss=loss) #loss is set at rh on its interface to ri only link_ht_sw1.intf2.config( bw = bwbn, max_queue_size = mqs) link_hu_sw1.intf2.config( bw = bwbn, max_queue_size = mqs) link_rh_sw1.intf2.config( bw = bwbn, max_queue_size = mqs) link_it_sw2.intf2.config( bw = bwg, max_queue_size = mqs) link_iu_sw2.intf2.config( bw = bwg, max_queue_size = mqs) link_ri_sw2.intf2.config( bw = bwg, max_queue_size = mqs) link_rh_ri.intf2.config( bw = bwg, max_queue_size = mqs, delay=dly) #delay is set at ri on both interfaces net.start() info( '\n*** Configuring hosts\n' ) rh.cmd('ifconfig rh-eth1 10.12.0.10 netmask 255.255.255.0') #reconfiguring mutiples intefaces host to prevent mininet strange initialisation behaviors rh.cmd('ifconfig rh-eth0 10.10.0.10 netmask 255.255.255.0') rh.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') #enable forwarding at routers ri.cmd('ifconfig ri-eth1 10.12.0.20 netmask 255.255.255.0') #reconfiguring mutiples intefaces host to prvent mininet strange initialisation behaviors ri.cmd('ifconfig ri-eth0 10.20.0.20 netmask 255.255.255.0') ri.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') #enable forwarding at routers #configure host default gateways ht.cmd('ip route add default via 10.10.0.10') hu.cmd('ip route add default via 10.10.0.10') it.cmd('ip route add default via 10.20.0.20') iu.cmd('ip route add default via 10.20.0.20') #configure router routing tables rh.cmd('ip route add default via 10.12.0.20') ri.cmd('ip route add default via 10.12.0.10') # weiyu: iu.cmd('touch server.pcap') hu.cmd('touch client.pcap') rh.cmd('tc qdisc del dev rh-eth1 root') rh.cmd('tc qdisc add dev rh-eth1 root netem loss gemodel 0.2% 2% 90% 2% limit ' + str(mqs)) #rh.cmd('tc qdisc add dev rh-eth1 root netem loss gemodel 0.2% 2% 90% 2% limit 10') #rh.cmd('tc qdisc add dev rh-eth1 root netem loss gemodel 0.1% 1% 90% 2% limit 1000') #rh.cmd('tc qdisc add dev rh-eth1 root netem loss gemodel 0.5% 2% 90% 2% limit 1000') # rh.cmd('python ./monitor_qlen_rh.py &') rh.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T rh -e \'sudo python ./monitor_queue.py\' &') # ri.cmd('python ./monitor_qlen_ri.py &') ri.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T ri -e \'sudo python ./monitor_qlen_ri.py\' &') #it.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T it -e \'sudo ./tcpserver 6666 > tcp-output-server.txt\' &') #ht.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T ht -e \'sleep 10; sudo ./tcpclient 10.20.0.1 6666 > tcp-output-client.txt\' &') # iu.cmd('tshark -i iu-eth0 -w server.pcap &') iu.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T iu -e \'sudo tshark -i iu-eth0 -w server.pcap\' &') # iu.cmd('./server.sh &') iu.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T iu -e \'sudo ./server.sh > output-server.txt\' &') # iu.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T iu -e \'python3 udp_server.py | tee udp-output-server.txt\' &') # hu.cmd('tshark -i hu-eth0 -w client.pcap &') hu.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T hu -e \'sudo tshark -i hu-eth0 -w client.pcap\' &') # hu.cmd('./client.sh &') hu.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T hu -e \'sleep 5; sudo ./client.sh > output-client.txt\' &') # hu.cmd('xterm -xrm \'XTerm.vt100.allowTitleOps: false\' -T hu -e \'python3 udp_client.py | tee udp-output-client.txt \' &') it.cmd('ethtool -K it-eth0 tx off sg off tso off') #disable TSO on TCP on defaul TCP sender need to be done on other host if sending large TCP file from other nodes method = 'tcp' #case selector varible for the flow used by smart-grid 'udp' = FRED logFolder = "../Estimations/wifiTer/"+ method + "/" #folder where log files and metrics will be saved # timeout = 10 #durantion of test #if not os.path.exists(logFolder): try: os.makedirs(logFolder) #error if folder already exist in order to prevent exidental overwirie except: print("File already exists.") # makeTerms([iu, hu, rh, ri], "host") #hu.cmd("bash /home/lca2/Desktop/server.sh") time.sleep(1) #iu.cmd("bash /home/lca2/Desktop/client-network.sh") time.sleep(1) """it.cmd("python3 tcpserver.py &> "+logFolder+"it.txt &") time.sleep(1) ht.cmd("python3 tcpclient.py --ip 10.20.0.1 --port 4242 -s "+logFolder+"ht- -t "+str(timeout)+" &> "+logFolder+"ht.txt &") #potential second flow in the reverse direction of the first #ht.cmd("python3 tcpserver.py --ip 10.10.0.1 --port 4243 &> "+logFolder+"ht2.txt &") #time.sleep(1) #it.cmd("python3 tcpclient.py --ip 10.10.0.1 --port 4243 -s "+logFolder+"it2- -t "+str(timeout)+" &> "+logFolder+"it2.txt &") #smart grid data will be transported by TCP, delay will be recorded if method == 'tcp': info(method) iu.cmd("python3 delayReceiver.py --tcp --ip 10.20.0.2 -p 4242 -s "+logFolder+"iu- -t "+str(timeout)+" &> "+logFolder+"iu.txt &") time.sleep(1) hu.cmd("python3 tcpsender.py -t "+str(timeout)+" &> "+logFolder+"hu.txt &") #smart grid data will be transported by FRED, delay will be recorded elif method == 'udp': info(method) iu.cmd("python3 delayReceiver.py --ip 10.20.0.2 -p 4242 -s "+logFolder+"iu- -t "+str(timeout)+" &> "+logFolder+"iu.txt &") time.sleep(1) hu.cmd("python3 udpsender.py -s "+logFolder+"hu- -t "+str(timeout)+" &> "+logFolder+"hu.txt &") else: info("method unknown") net.stop() return #wainting until test end info('\n*** Sleeping\n') for i in range(int(timeout)): time.sleep(60) info("**slept "+str(i+1))""" # Enable the mininet> prompt if uncommented info('\n*** Running CLI\n') CLI(net) #kill xterms in case some where opened #ht.cmd("killall xterm") #it.cmd("killall xterm") # hu.cmd("killall xterm") iu.cmd("killall xterm")
def creatNetwork(self): switch = partial( OVSSwitch, protocols='OpenFlow13' ) net = Mininet( topo=LinearTopo( k=5 ), switch=switch ) net.start() return net
def emptyNet(): #ns.core.LogComponentEnable("UanChannel", ns.core.LOG_ALL) #ns.core.LogComponentEnable("UanHelper", ns.core.LOG_ALL) #ns.core.LogComponentEnable("UanNetDevice", ns.core.LOG_ALL) #ns.core.LogComponentEnable("UanPhyGen", ns.core.LOG_ALL) #ns.core.LogComponentEnable("TagBuffer", ns.core.LOG_ALL) #ns.core.LogComponentEnable("TapBridge", ns.core.LOG_ALL) #ns.core.LogComponentEnable("TapBridgeHelper", ns.core.LOG_ALL) #ns.core.LogComponentEnable("TapFdNetDeviceHelper", ns.core.LOG_ALL) #ns.core.LogComponentEnable("UanPhy", ns.core.LOG_ALL) #ns.core.LogComponentEnableAll(ns.core.LOG_PREFIX_NODE) #ns.core.LogComponentEnableAll(ns.core.LOG_PREFIX_TIME) #"Create an empty network and add nodes to it." net = Mininet(topo=None, build=False) net.addController('c0', controller=RemoteController, ip='10.0.0.4', port=6633) h1 = net.addHost('h1', ip='10.0.0.1') h2 = net.addHost('h2', ip='10.0.0.2') #h3 = net.addHost( 'h3', ip='10.0.0.3' ) h4 = net.addHost('h4', ip='10.0.0.4') s1 = net.addSwitch('s1', cls=OVSSwitch, inband=True) s2 = net.addSwitch('s2', cls=OVSSwitch, inband=True) #s3 = net.addSwitch( 's3', cls=OVSSwitch, inband=True ) net.addLink(h1, s1) net.addLink(h2, s2) #net.addLink( h3, s3 ) uanLL = UanSegment(ns.uan.UanTxMode.FSK, 10000, 10000, 24000, 6000, 2, "Default mode", 0, 0, 1) #uanLL.add(s0) uanLL.add(h4) uanLL.add(s1) uanLL.add(s2) #uanLL.add(s3) #Csma.add(s3) #net.addLink( h4, s1 ) #net.addLink( h4, s2 ) #net.addLink( h4, s3 ) #net.addLink( s1, s2 ) #net.addLink( s2, s3 ) mobility_helpers = { 'h1': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel"), 'h2': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel"), 'h3': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel"), 'h4': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel"), 's1': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel"), 's2': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel"), 's3': opennet.createMobilityHelper("ns3::ConstantPositionMobilityModel") } list_position = { 'h1': opennet.createListPositionAllocate(x1=0, y1=1, z1=-10), 'h2': opennet.createListPositionAllocate(x1=0, y1=-1, z1=-10), 'h3': opennet.createListPositionAllocate(x1=10, y1=10, z1=-10), 'h4': opennet.createListPositionAllocate(x1=0, y1=0, z1=-10), 's1': opennet.createListPositionAllocate(x1=0, y1=2, z1=-10), 's2': opennet.createListPositionAllocate(x1=0, y1=-2, z1=-10), 's3': opennet.createListPositionAllocate(x1=10, y1=10, z1=-10) } mobility_models = { 'h1': opennet.setListPositionAllocate(mobility_helpers['h1'], list_position['h1']), 'h2': opennet.setListPositionAllocate(mobility_helpers['h2'], list_position['h2']), 'h3': opennet.setListPositionAllocate(mobility_helpers['h3'], list_position['h3']), 'h4': opennet.setListPositionAllocate(mobility_helpers['h4'], list_position['h4']), 's1': opennet.setListPositionAllocate(mobility_helpers['s1'], list_position['s1']), 's2': opennet.setListPositionAllocate(mobility_helpers['s2'], list_position['s2']), 's3': opennet.setListPositionAllocate(mobility_helpers['s3'], list_position['s3']) } opennet.setMobilityModel(h1, mobility_models.get('h1')) opennet.setMobilityModel(h2, mobility_models.get('h2')) #opennet.setMobilityModel(h3, mobility_models.get('h3')) opennet.setMobilityModel(h4, mobility_models.get('h4')) opennet.setMobilityModel(s1, mobility_models.get('s1')) opennet.setMobilityModel(s2, mobility_models.get('s2')) #opennet.setMobilityModel(s3, mobility_models.get('s3')) net.start() opennet.start() #s1.cmd('ifconfig s1 10.0.0.10') #s2.cmd('ifconfig s2 10.0.0.11') #s3.cmd('ifconfig s3 10.0.0.12') CLI(net) net.stop()
class Topo(object): def __init__(self, props): self.props = props self.controllers = [] controllers = self.controllers self.hosts = {} hosts = self.hosts self.hosts_ip = {} hosts_ip = self.hosts_ip self.switches = {} switches = self.switches self.switches_openflow_names = {} switches_openflow_names = self.switches_openflow_names self.interfaces = {} interfaces = self.interfaces self.portmap = {} self.openflowportmap = {} self.host_connected_switch = {} self.number_of_swiches_links = 0 self.number_of_switches = 0 #switchClass = UserSwitch #switchClass = OVSSwitch self.switchClass = partial(OVSSwitch, datapath='user') topo = MNTopo() self.topo = topo if 'host' not in props or props['host'] is None: props['host'] = [] for host in props['host']: mac = None if 'mac' not in host else host['mac'] print "adding host {}".format(host['name']) hosts[host['name']] = topo.addHost(host['name'], ip=host['ip'], defaultRoute='via ' + host['gw'], mac=mac) hosts_ip[host['name']] = host['ip'].split('/')[0] if 'switch' not in props or props['switch'] is None: props['switch'] = [] self.number_of_switches = len(props['switch']) for switch in props['switch']: name = switch['name'] if 'type' not in switch: switch['type'] = 'ovs' switches[name] = switch if switch['type'] == 'ovs': print "adding switch {}".format(name) switches[name] = topo.addSwitch(name, dpid=switch['dpid'], protocols=switch['protocols']) else: print "switch {} is not OVS".format(name) switches_openflow_names[name] = "openflow:" + str( int(switch['dpid'], 16)) if 'link' not in props or props['link'] is None: props['link'] = [] # create mininet connections for link in props['link']: src_name = link['source'] dst_name = link['destination'] source = None if src_name in switches: source = switches[src_name] else: source = hosts[src_name] destination = None if dst_name in switches: destination = switches[dst_name] else: destination = hosts[dst_name] if ('type' not in source or source['type'] == 'ovs') and ( 'type' not in destination or destination['type'] == 'ovs'): print "adding link from {} to {}".format(source, destination) topo.addLink(source, destination) else: print "link from {} to {} does not connect two OVS switches".format( source, destination) if src_name in switches and dst_name in switches: self.number_of_swiches_links = self.number_of_swiches_links + 2 # save port mapping ports = {} for link in props['link']: src = link['source'] if src not in ports: ports[src] = 1 src_port = ports[src] ports[src] = ports[src] + 1 dst = link['destination'] if dst not in ports: ports[dst] = 1 dst_port = ports[dst] ports[dst] = ports[dst] + 1 if src not in self.portmap: self.portmap[src] = {} self.portmap[src][dst] = src_port if src in self.switches and dst in self.switches: self.openflowportmap[ self.switches_openflow_names[src] + ':' + str(src_port)] = self.switches_openflow_names[dst] if dst not in self.portmap: self.portmap[dst] = {} self.portmap[dst][src] = dst_port if dst in self.switches and src in self.switches: self.openflowportmap[ self.switches_openflow_names[dst] + ':' + str(dst_port)] = self.switches_openflow_names[src] # skip connections between hosts if src in self.hosts and dst in self.hosts: continue # save the connected switch by host if (src in self.hosts and dst in self.switches): self.host_connected_switch[src] = dst elif (dst in self.hosts and src in self.switches): self.host_connected_switch[dst] = src if 'controller' not in props or props['controller'] is None: props['controller'] = [{'name': 'c0', 'ip': '127.0.0.1'}] for controller in props['controller']: controllers.append( RemoteController(controller['name'], ip=controller['ip'])) def start(self): cleanup() self.net = Mininet(topo=self.topo, switch=self.switchClass, controller=self.controllers[0]) # if there are multiple controllers, let's append the rest of the controllers itercrtls = iter(self.controllers) next(itercrtls) for ctrl in itercrtls: self.net.addController(ctrl) if 'interface' not in self.props or self.props['interface'] is None: self.props['interface'] = [] for interface in self.props['interface']: name = interface['name'] print "adding interface {} to switch".format( name, interface['switch']) self.interfaces[name] = Intf( name, node=self.net.nameToNode[interface['switch']]) self.net.start() def cli(self): CLI(self.net) def stop(self): self.net.stop() cleanup() def get_nodes_flows_groups(self, prefix=None): nodes = {} for name in self.switches_openflow_names: if not exists_bridge(name): continue oname = self.switches_openflow_names[name] nodes[oname] = {'cookies': [], 'groups': [], 'bscids': {}} output = subprocess.check_output( "sudo ovs-ofctl dump-groups {} --protocol=Openflow13".format( name), shell=True) pattern = r'group_id=(\d+)' regex = re.compile(pattern, re.IGNORECASE) for match in regex.finditer(output): nodes[oname]['groups'].append(int(match.group(1))) output = subprocess.check_output( "sudo ovs-ofctl dump-flows {} --protocol=Openflow13".format( name), shell=True) pattern = r'cookie=(0[xX][0-9a-fA-F]+)' regex = re.compile(pattern, re.IGNORECASE) for match in regex.finditer(output): number = int(match.group(1), 16) nodes[oname]['cookies'].append(number) if prefix is None: continue if number >> 56 == prefix: bscid = (number & 0x00FFFFFF00000000) >> 32 nodes[oname]['bscids'][bscid] = { 'cookie': number, 'version': (number & 0x00000000FF000000) >> 24 } return nodes def containsSwitch(self, name): return str(name) in self.switches_openflow_names or str( name) in self.switches_openflow_names.values() def get_nodes_flows_groups_stats(self, prefix=None): nodes = {} for name in self.switches_openflow_names: if not exists_bridge(name): continue nodes[name] = {'flows': {}, 'groups': {}} output = subprocess.check_output( "sudo ovs-ofctl dump-group-stats {} --protocol=Openflow13". format(name), shell=True) pattern = r'group_id=(\d+)' regex = re.compile(r'(group_id=.*)', re.IGNORECASE) regexvalues = re.compile( r'group_id=(\d+),duration=[\d]*.[\d]*s,ref_count=[\d]*,packet_count=(\d+),byte_count=(\d+)', re.IGNORECASE) for linematch in regex.finditer(output): line = linematch.group(1) for match in regexvalues.finditer(line): nodes[name]['groups'][match.group(1)] = { 'packets': match.group(2), 'bytes': match.group(3) } output = subprocess.check_output( "sudo ovs-ofctl dump-flows {} --protocol=Openflow13".format( name), shell=True) regex = re.compile(r'(cookie=.*)', re.IGNORECASE) regexvalues = re.compile( r'cookie=(0[xX][0-9a-fA-F]+),.*n_packets=(\d+),.*n_bytes=(\d+)', re.IGNORECASE) for linematch in regex.finditer(output): line = linematch.group(1) for match in regexvalues.finditer(line): number = int(match.group(1), 16) if prefix is None or number >> 56 == prefix: bscid = (number & 0x00FFFFFF00000000) >> 32 nodes[name]['flows'][str(number)] = { 'packets': match.group(2), 'bytes': match.group(3) } return nodes
def bufferbloat(): if not os.path.exists(args.dir): os.makedirs(args.dir) os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong) topo = BBTopo() net = Mininet(topo=topo, host=CPULimitedHost, link=AQMLink) net.start() # Hint: The command below invokes a CLI which you can use to # debug. It allows you to run arbitrary commands inside your # emulated hosts h1 and h2. # Note: It can take a while to pop out xterm windows in GCP. # CLI(net) # This dumps the topology and how nodes are interconnected through # links. dumpNodeConnections(net.hosts) # This performs a basic all pairs ping test. net.pingAll() # Start all the monitoring processes start_tcpprobe("%s/cwnd.txt" % (args.dir)) # TODO: Start monitoring the queue sizes. Since the switch I # created is "s0", I monitor one of the interfaces. Which # interface? The interface numbering starts with 1 and increases. # Depending on the order you add links to your network, this # number may be 1 or 2. Ensure you use the correct number. qmon = start_qmon("s0-eth2") # TODO: Start iperf, webservers, ping. start_iperf(net) start_webserver(net) start_ping(net) # TODO: measure the time it takes to complete webpage transfer # from h1 to h2 (say) 3 times. Hint: check what the following # command does: curl -o /dev/null -s -w %{time_total} google.com # Now use the curl command to fetch webpage from the webserver you # spawned on host h1 (not from google!) # Hint: Where is the webserver located? h1, h2 = net.getNodeByName('h1', 'h2') h1.popen("curl -o /dev/null -s -w %{time_total} %s", h2.IP(), shell=True) # Hint: have a separate function to do this and you may find the # loop below useful. start_time = time() while True: # do the measurement (say) 3 times. sleep(5) sleep(5) now = time() delta = now - start_time if delta > args.time: break print "%.1fs left..." % (args.time - delta) # TODO: compute average (and standard deviation) of the fetch # times. You don't need to plot them. Just note it in your # README and explain. stop_tcpprobe() qmon.terminate() net.stop() # Ensure that all processes you create within Mininet are killed. # Sometimes they require manual killing. Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
def sdn1net(): topo = SDNTopo() info('*** Creating network\n') # time.sleep( 30 ) net = Mininet(topo=topo, controller=RemoteController) speaker1, speaker2, peer64514, peer64515, peer64516 = \ net.get( 'speaker1', 'speaker2' , 'peer64514', 'peer64515', 'peer64516' ) # Adding addresses to host64513_1 interface connected to sw24 # for BGP peering speaker1.setMAC('00:00:00:00:00:01', 'speaker1-eth0') speaker1.cmd('ip addr add 10.0.4.101/24 dev speaker1-eth0') speaker1.cmd('ip addr add 10.0.5.101/24 dev speaker1-eth0') speaker1.cmd('ip addr add 10.0.6.101/24 dev speaker1-eth0') speaker1.defaultIntf().setIP('10.1.4.101/24') speaker1.defaultIntf().setMAC('00:00:00:00:00:01') # Net has to be start after adding the above link net.start() # setup configuration on the interface connected to switch peer64514.cmd("ifconfig peer64514-eth0 10.0.4.1 up") peer64514.setMAC('00:00:00:00:00:04', 'peer64514-eth0') peer64515.cmd("ifconfig peer64515-eth0 10.0.5.1 up") peer64515.setMAC('00:00:00:00:00:05', 'peer64515-eth0') peer64516.cmd("ifconfig peer64516-eth0 10.0.6.1 up") peer64516.setMAC('00:00:00:00:00:06', 'peer64516-eth0') # setup configuration on the interface connected to hosts peer64514.setIP("4.0.0.254", 8, "peer64514-eth1") peer64514.setMAC('00:00:00:00:00:44', 'peer64514-eth1') peer64515.setIP("5.0.0.254", 8, "peer64515-eth1") peer64515.setMAC('00:00:00:00:00:55', 'peer64515-eth1') peer64516.setIP("6.0.0.254", 8, "peer64516-eth1") peer64516.setMAC('00:00:00:00:00:66', 'peer64516-eth1') # enable forwarding on BGP peer hosts peer64514.cmd('sysctl net.ipv4.conf.all.forwarding=1') peer64515.cmd('sysctl net.ipv4.conf.all.forwarding=1') peer64516.cmd('sysctl net.ipv4.conf.all.forwarding=1') # config interface for control plane connectivity peer64514.setIP("192.168.0.4", 24, "peer64514-eth2") peer64515.setIP("192.168.0.5", 24, "peer64515-eth2") peer64516.setIP("192.168.0.6", 24, "peer64516-eth2") # Setup hosts in each non-SDN AS host64514, host64515, host64516 = \ net.get( 'host64514', 'host64515', 'host64516' ) host64514.cmd('ifconfig host64514-eth0 4.0.0.1 up') host64514.cmd('ip route add default via 4.0.0.254') host64514.setIP('192.168.0.44', 24, 'host64514-eth1') # for control plane host64515.cmd('ifconfig host64515-eth0 5.0.0.1 up') host64515.cmd('ip route add default via 5.0.0.254') host64516.cmd('ifconfig host64516-eth0 6.0.0.1 up') host64516.cmd('ip route add default via 6.0.0.254') # set up swCtl100 as a learning swCtl100 = net.get('swCtl100') swCtl100.cmd('ovs-vsctl set-controller swCtl100 none') swCtl100.cmd('ovs-vsctl set-fail-mode swCtl100 standalone') # connect all switches to controller for i in range(1, numSw + 1): swX = net.get('sw%s' % (i)) swX.cmd('ovs-vsctl set-controller sw%s tcp:%s:6633' % (i, onos1IP)) # Start Quagga on border routers ''' for i in range ( 64514, 64516 + 1 ): startquagga( 'peer%s' % ( i ), i, 'quagga%s.conf' % ( i ) ) ''' startquagga(peer64514, 64514, 'quagga64514.conf') startquagga(peer64515, 64515, 'quagga64515.conf') startquagga(peer64516, 64516, 'quagga64516.conf') # start Quagga in SDN network startquagga(speaker1, 64513, 'quagga-sdn.conf') root = net.get('root') root.intf('root-eth0').setIP('1.1.1.2/24') root.cmd('ip addr add 192.168.0.100/24 dev root-eth0') speaker1.intf('speaker1-eth1').setIP('1.1.1.1/24') stopsshd() hosts = [peer64514, peer64515, peer64516, host64514] startsshds(hosts) # forwarding1 = '%s:2000:%s:2000' % ('1.1.1.2', onos1IP) root.cmd('ssh -nNT -o "PasswordAuthentication no" \ -o "StrictHostKeyChecking no" -l sdn -L %s %s & ' % (forwarding1, onos1IP)) # time.sleep( 3000000000 ) CLI(net) stopsshd() stopquagga() net.stop()
def topology(): "Create a network." net = Mininet(controller=Controller, link=TCLink, switch=OVSKernelSwitch) c1 = net.addController('c1', controller=RemoteController, ip='192.168.56.1', port=6633) c1.start() info('*** Adding switches\n') n = 10 #number of hosts per edge node lamda = 0.1 #flow arrival rate sw = 18 #total number of switches experimentDuration = 500 for s in range(sw): switch = net.addSwitch('s%s' % (s + 1)) switch.start([c1]) switches = net.switches info('*** Creating links and adding hosts to edge switches\n') j = 0 noCore = 12 #number of core switches CORE_SWITCHES = [] #the set of core switche for s in switches: j = j + 1 i = 0 for ss in switches: i = i + 1 if i > j and i <= noCore and j <= noCore: #12 core switches if random.random() > 0.5: net.addLink(s, ss, bw=10) CORE_SWITCHES.append(s) CORE_SWITCHES.append(ss) if j > noCore: # 6 edge switches net.addLink(s, CORE_SWITCHES[j - 11], bw=10) net.addLink(s, CORE_SWITCHES[j - 12], bw=10) info('*** Adding hosts to switch\n') for h in range(n): host = net.addHost('h%s' % (h + j * sw)) net.addLink(host, s) break net.start() flowCounter = 0 mu = 0.05 # average flow duration 1/mu 20 s flowStartAfter = random.expovariate(lamda) flowStartTime = flowStartAfter print(flowStartAfter) while (experimentDuration > flowStartTime): time.sleep(flowStartAfter) hostPair = random.sample(net.hosts, 2) src, dst = hostPair # a tuple of Mininet host objects flowDuration = random.expovariate(mu) print(flowDuration) x = 'c' + str(flowDuration) #src.cmd( 'ping -%s'%x, dst.IP(), '1> /tmp/h1.out 2>/tmp/h1.err &' ) src.cmd('iperf -s &') dst.cmd('iperf -c', src.IP(), '-t %s &' % flowDuration) flowStartAfter = random.expovariate(lamda) flowStartTime = flowStartTime + flowStartAfter print(flowStartAfter) print(flowStartTime) flowCounter = flowCounter + 1 print(flowCounter) CLI(net)
def simpleTest(): #os.system("sudo mysql -u root -p mysql < ./schema.sql") #"Create and test a simple network" topo = SingleSwitchTopo(n=4) net = Mininet(topo, controller=partial(RemoteController, ip='127.0.0.1', port=6633)) net.start() os.system("sudo ./deleteFlowForSwitchs.sh") # os.system("ovs-vsctl set Bridge s1 protocols=OpenFlow13"); # os.system("ovs-vsctl set Bridge s2 protocols=OpenFlow13"); # os.system("ovs-vsctl set Bridge s3 protocols=OpenFlow13"); # os.system("ovs-vsctl set Bridge s4 protocols=OpenFlow13"); net.pingAll() os.system("sudo ./vpn.sh PUT 127.0.0.1:8181") os.system("sudo rm /etc/suricata/rules/i2nsf-firewall.rules") os.system("sudo rm /etc/suricata/rules/i2nsf-time-firewall.rules") os.system("sudo rm /etc/suricata/rules/i2nsf-web.rules") os.system("sudo rm /etc/suricata/rules/i2nsf-mail.rules") # Inintalize components smes = net.get('smes') m_user = net.get('m_user') firewall = net.get('firewall') firewall_2 = net.get('firewall_2') #test = net.get('test'); sp = net.get('sp') web_filter = net.get('web_filter') mail = net.get('mail') nat = net.get('nat') vpn = net.get('vpn') firewall.cmd('cd ../NSF/Firewall_based_time; sudo make clean') firewall.cmd('secu') firewall.cmd('sudo make all start >> /tmp/time_firewall.out &') firewall_2.cmd('cd ../NSF/Firewall; sudo make clean') firewall_2.cmd('secu') firewall_2.cmd('sudo make all start >> /tmp/firewall.out &') smes.cmd('sudo service sendmail restart &') web_filter.cmd('cd ../NSF/Web_Filter; sudo make clean') web_filter.cmd('secu') web_filter.cmd('sudo make all start >> /tmp/web_filter.out &') mail.cmd('cd ../NSF/Mail_Filter; sudo make clean') mail.cmd('secu') mail.cmd('sudo make all start >> /tmp/mail.out &') sp.cmd('cd ~/Hackathon/Hackathon-102/FullVersion/SecurityController') sp.cmd('sudo service apache2 stop >> /tmp/webserver.out') sp.cmd('sudo service apache2 start >> /tmp/webserver.out') sp.cmd('cd ../Developer-mgmt-system/; sudo make clean') sp.cmd('secu') sp.cmd('sudo make all start >> /tmp/webserver.out &') sp.cmd('cd ~/Hackathon/Hackathon-102/FullVersion/SecurityController') sp.cmd('sudo python server.py >> /tmp/webserver.out &') sp.cmd('cd /works/jetconf') sp.cmd( 'sudo python3.6 run.py -c example-config.yaml >> /tmp/webserver.out &') sp.cmd('sudo make clean') sp.cmd('sudo make all start >> /tmp/SecurityController.out &') smes.cmd('sudo route add default gw', '10.0.0.201') #test.cmd( 'sudo route add default gw', '10.0.0.201') m_user.cmd('sudo route add default gw', '10.0.0.201') m_user.cmd('sudo sysctl net.ipv4.ip_forward=1') vpn.cmd('sudo route add default gw', '10.0.0.200') firewall.cmd('sudo route add default gw', '10.0.0.203') web_filter.cmd('sudo route add default gw', '10.0.0.150') firewall_2.cmd('sudo route add default gw', '10.0.0.202') mail.cmd('sudo route add default gw', '10.0.0.150') smes.cmd('sudo sysctl net.ipv4.conf.all.send_redirects=0') smes.cmd('sudo sysctl net.ipv4.ip_forward=1') vpn.cmd('sudo sysctl net.ipv4.conf.all.send_redirects=0') vpn.cmd('sudo sysctl net.ipv4.ip_forward=1') firewall.cmd('sudo sysctl net.ipv4.ip_forward=1') firewall.cmd('sudo iptables -I FORWARD -j NFQUEUE') firewall.cmd( 'sudo rm /var/run/suricata-time-firewall.pid >> /tmp/time_firewall.out' ) firewall.cmd('sudo rm /var/run/suricata/time_firewall.socket') firewall.cmd( 'sudo /usr/bin/suricata -D --pidfile /var/run/suricata-time-firewall.pid -c /etc/suricata/suricata_firewall_based_time.yaml -q 0 >> /tmp/time_firewall.out' ) firewall.cmd( 'sudo /usr/bin/suricatasc -c reload-rules & >> /tmp/time_firewall.out') firewall_2.cmd('sudo sysctl net.ipv4.ip_forward=1') firewall_2.cmd('sudo iptables -I FORWARD -j NFQUEUE') firewall_2.cmd( 'sudo rm /var/run/suricata-firewall.pid >> /tmp/firewall.out') firewall_2.cmd('sudo rm /var/run/suricata/firewall.socket') firewall_2.cmd( 'sudo /usr/bin/suricata -D --pidfile /var/run/suricata-firewall.pid -c /etc/suricata/suricata_firewall.yaml -q 0 >> /tmp/firewall.out' ) firewall_2.cmd( 'sudo /usr/bin/suricatasc -c reload-rules & >> /tmp/firewall.out') web_filter.cmd('sudo sysctl net.ipv4.ip_forward=1') web_filter.cmd('sudo iptables -I FORWARD -j NFQUEUE') web_filter.cmd('sudo rm /var/run/suricata-web.pid >> /tmp/webfilter.out') web_filter.cmd('sudo rm /var/run/suricata/web.socket') web_filter.cmd( 'sudo /usr/bin/suricata -D --pidfile /var/run/suricata-web.pid -c /etc/suricata/suricata_web.yaml -q 0 >> /tmp/web_filter.out' ) web_filter.cmd( 'sudo /usr/bin/suricatasc -c reload-rules & >> /tmp/web_filter.out') mail.cmd('sudo sysctl net.ipv4.ip_forward=1') mail.cmd('sudo iptables -I FORWARD -j NFQUEUE') mail.cmd('sudo rm /var/run/suricata-mail.pid >> /tmp/mail.out') mail.cmd('sudo rm /var/run/suricata/mail.socket') mail.cmd( 'sudo /usr/bin/suricata -D --pidfile /var/run/suricata-mail.pid -c /etc/suricata/suricata_mail.yaml -q 0 >> /tmp/mail.out' ) mail.cmd('sudo /usr/bin/suricatasc -c reload-rules & >> /tmp/mail.out') # Identify the interface connecting to the mininet network localIntf = nat.defaultIntf() fixNetworkManager(nat, 'nat-eth0') # Flush any currently active rules nat.cmd('sudo iptables -F') nat.cmd('sudo iptables -t nat -F') # Create default entries for unmatched traffic nat.cmd('sudo iptables -P INPUT ACCEPT') nat.cmd('sudo iptables -P OUTPUT ACCEPT') nat.cmd('sudo iptables -P FORWARD DROP') # Configure NAT nat.cmd('sudo iptables -I FORWARD -i', localIntf, '-d', '10.0/8', '-j DROP') nat.cmd('sudo iptables -A FORWARD -i', localIntf, '-s', '10.0/8', '-j ACCEPT') nat.cmd('sudo iptables -A FORWARD -i', 'eth0', '-d', '10.0/8', '-j ACCEPT') nat.cmd('sudo iptables -t nat -A POSTROUTING -o ', 'eth0', '-j MASQUERADE') # Instruct the kernel to perform forwarding nat.cmd('sudo sysctl net.ipv4.ip_forward=1') CLI(net) os.system("sudo killall -9 /usr/bin/suricata") """Stop NAT/forwarding between Mininet and external network""" # Flush any currently active rules nat.cmd('sudo iptables -F') nat.cmd('sudo iptables -t nat -F') # Instruct the kernel to stop forwarding nat.cmd('sudo sysctl net.ipv4.ip_forward=0') os.system("sudo ./vpn.sh DELETE 127.0.0.1:8181") net.stop()
class Dispatcher(Bottle): def __init__(self): super(Dispatcher, self).__init__() config = { 'user': '******', 'password': '******', 'host': 'db', 'port': '3306', 'database': 'emulator' } self.connection = mysql.connector.connect(**config) self.connection.autocommit = True self.cursor = self.connection.cursor() self.topo_handler = TopoHandler() self.ryu_cmd = "ryu-manager --observe-links --wsapi-host %s --wsapi-port %s ryu.app.iot_switch &" % ( CONTROLLER_HOST, CONTROLLER_PORT) self.is_net_started = False self.initial_charge_level = 10000 self.start_net() self.route('/nodes/<node_name>', method='POST', callback=self.post_node) self.route('/switch/<switch_name>', method='POST', callback=self.add_switch) self.route('/switch/<switch_name>', method='DELETE', callback=self.del_switch) self.route('/host/<host_name>', method='POST', callback=self.add_host) self.route('/host/<host_name>', method='DELETE', callback=self.del_host) self.route('/link', method='POST', callback=self.add_link) self.route('/link', method='DELETE', callback=self.del_link) self.route('/test', method='GET', callback=self.test) self.route('/nodes/<node_name>/cmd', method='POST', callback=self.do_cmd) self.route('/events/<dpid>', method='GET', callback=self.get_events_page) self.route('/events/<dpid>/total', method='GET', callback=self.get_events_total) self.route('/events/charge_state', method='GET', callback=self.get_charge_state) self.route('/count/events', method='GET', callback=self.get_events_count) self.route('/events/<dpid>/charge_events', method='GET', callback=self.get_charge_events) self.route('/events/<dpid>/charge_events/total', method='GET', callback=self.get_charge_total) self.route('/initial_charge', method='GET', callback=self.get_initial_charge_level) self.route('/initial_charge', method='PUT', callback=self.set_initial_charge_level) self.route('/', method='OPTIONS', callback=self.options_handler) self.route('/<path:path>', method='OPTIONS', callback=self.options_handler) self.route('/net/start', method='GET', callback=self.start_net) self.route('/net/stop', method='GET', callback=self.stop_net) self.route('/net/status', method='GET', callback=self.net_status) self.route('/net/topo', method='GET', callback=self.get_topology) def options_handler(self, path=None): return def get_req(self, req): while True: try: print "trying to get ", req l = requests.get(req).json() break except: time.sleep(5) return l def start_net(self): if self.is_net_started: response.status = 403 else: self.cursor.execute('DELETE FROM topology') for node_a, node_b in self.topo_handler.get_links(): self.cursor.execute( "REPLACE INTO topology (node_a, node_b) VALUES (%s, %s)", (node_a, node_b)) self.connection.commit() self.net = Mininet(MyTopo(self.topo_handler), switch=OVSSwitch, controller=RemoteController('c0', ip='127.0.0.1', port=6653)) self.net.start() self.net['c0'].cmd(self.ryu_cmd) self.update_mac_to_dpid() self.net.pingAll() ts = time.time() timestamp = datetime.datetime.fromtimestamp(ts).strftime( '%Y-%m-%d %H:%M:%S') l = self.get_req('http://localhost:5555/v1.0/topology/switches') for el in l: self.cursor.execute( "REPLACE INTO charge_state (dpid, charge, ts) VALUES (%s, %s, %s)", (el['dpid'], self.initial_charge_level, timestamp)) self.connection.commit() self.is_net_started = True print "*** Network has started ***" def stop_net(self): if not self.is_net_started: response.status = 403 else: self.net.stop() os.system('fuser -k 6653/tcp') # kill mininet controller self.is_net_started = False def net_status(self): return {"status": self.is_net_started} def update_mac_to_dpid(self): self.cursor.execute("DELETE FROM mac_to_dpid") l = self.get_req('http://localhost:5555/v1.0/topology/switches') for el in l: for mac in el['ports']: self.cursor.execute( "REPLACE INTO mac_to_dpid (mac_addr, dpid) VALUES (%s, %s)", (mac['hw_addr'], el['dpid'])) for el in self.topo_handler.get_hosts(): self.cursor.execute( "REPLACE INTO mac_to_dpid (mac_addr, dpid) VALUES (%s, %s)", (self.topo_handler.get_host_mac(el), el)) self.connection.commit() def test(self): return "TEST!" def post_node(self, node_name): if not self.is_net_started: response.status = 403 else: node = self.net[node_name] node.params.update(request.json['params']) def add_switch(self, switch_name): if switch_name not in self.topo_handler.get_switches( ) and not self.is_net_started: c0 = self.net.get('c0') self.topo_handler.add_switch(switch_name) else: response.status = 403 def add_host(self, host_name): if host_name not in self.topo_handler.get_switches( ) and not self.is_net_started: self.topo_handler.add_host(host_name) else: response.status = 403 def del_switch(self, switch_name): if switch_name in self.topo_handler.get_switches( ) and not self.is_net_started: self.topo_handler.delete_switch(switch_name) else: response.status = 403 def del_host(self, host_name): if host_name in self.topo_handler.get_hosts( ) and not self.is_net_started: self.topo_handler.delete_host(host_name) else: response.status = 403 def del_link(self): a = request.json['a'] b = request.json['b'] has_link = ((a, b) not in self.topo_handler.get_links() and (b, a) not in self.topo_handler.get_links()) if has_link or self.is_net_started: response.status = 403 else: self.net.configLinkStatus(a, b, 'down') if (a, b) in self.topo_handler.get_links(): self.topo_handler.delete_link((a, b)) else: self.topo_handler.delete_link((b, a)) self.net.start() def is_node(self, name): return name in self.topo_handler.get_switches( ) or name in self.topo_handler.get_hosts() def add_link(self): a = request.json['a'] b = request.json['b'] nodes = self.is_node(a) and self.is_node(b) if not nodes or self.is_net_started: response.status = 403 else: self.topo_handler.add_link((a, b)) def get_topology(self): return { "hosts": list(self.topo_handler.get_hosts()), "switches": list(self.topo_handler.get_switches()), "links": self.topo_handler.get_links() } def do_cmd(self, node_name): if not self.is_net_started: response.status = 403 else: timeout = float(request.query['timeout']) args = request.body.read() node = self.net[node_name] rest = args.split(' ') # Substitute IP addresses for node names in command # If updateIP() returns None, then use node name rest = [ self.net[arg].defaultIntf().updateIP() or arg if arg in self.net else arg for arg in rest ] rest = ' '.join(rest) # Run cmd on node: node.sendCmd(rest) output = '' init_time = time.time() while node.waiting: exec_time = time.time() - init_time #timeout of 5 seconds if exec_time > timeout: break data = node.monitor(timeoutms=1000) output += data # Force process to stop if not stopped in timeout if node.waiting: node.sendInt() time.sleep(0.5) data = node.monitor(timeoutms=1000) output += data node.waiting = False output = output.replace('<', '<') output = output.replace('>', '>') output = output.replace('\n', '<br>') return output def get_events_count(self): start = request.query['start'] end = request.query['end'] db_query = 'select dpid, count(*) as events from charge_events where ts >= %s and ts <= %s group by dpid' return self.jsonify_query(db_query, start, end) def get_events_total(self, dpid): db_query = 'SELECT count(*) as total FROM send_events WHERE dpid = %s' return self.jsonify_query(db_query, dpid) def get_events_page(self, dpid): perpage = int(request.query['perpage']) startat = int(request.query['page']) * perpage db_query = self.paginate( 'SELECT from_mac, to_mac, from_port, to_port, ts FROM send_events WHERE dpid = %s' ) return self.jsonify_query(db_query, dpid, perpage, startat) def get_charge_state(self): if not self.is_net_started: response.status = 403 else: return self.jsonify_query('SELECT * FROM charge_state') def get_charge_total(self, dpid): db_query = 'SELECT count(*) as total FROM charge_events WHERE dpid = %s' return self.jsonify_query(db_query, dpid) def get_charge_events(self, dpid): perpage = int(request.query['perpage']) startat = int(request.query['page']) * perpage return self.jsonify_query( self.paginate('SELECT * FROM charge_events WHERE dpid = %s'), dpid, perpage, startat) def get_initial_charge_level(self): return {"charge": self.initial_charge_level} def set_initial_charge_level(self): charge = request.json['charge'] self.initial_charge_level = charge return {"charge": self.initial_charge_level} def paginate(self, query): return query + ' ORDER BY id DESC LIMIT %s OFFSET %s;' def jsonify_query(self, db_query, *args): self.cursor.execute(db_query, args) hdrs = [x[0] for x in self.cursor.description] rv = self.cursor.fetchall() res = [] for el in rv: res.append(dict(zip(hdrs, el))) response.content_type = 'application/json' return json.dumps(res, indent=4, sort_keys=True, default=str)
def __init__(self): """Create a fat-tree network""" net = Mininet( controller=RemoteController ) c0 = RemoteController( 'c0', ip='127.0.0.1', port=6633 ) info( '*** Adding controller ***\n' ) net.addController(c0 ) ## Core switches info(' *** Core switches ***\n') cs_0 = net.addSwitch( 'cs0' ) cs_1 = net.addSwitch( 'cs1' ) cs_2 = net.addSwitch( 'cs2' ) cs_3 = net.addSwitch( 'cs3' ) ################################################## ## Pod 0 info( '*** Pod - 0 ***\n' ) info( '*** Adding switches ***\n' ) edge_sw_0 = net.addSwitch( 'edge_sw_0' ) edge_sw_1 = net.addSwitch( 'edge_sw_1' ) aggr_sw_2 = net.addSwitch( 'aggr_sw_2' ) aggr_sw_3 = net.addSwitch( 'aggr_sw_3' ) info( '*** Adding hosts ***\n' ) # Lower left h0 = net.addHost( 'h0', ip='10.0.0.2' ) h1 = net.addHost( 'h1', ip='10.0.0.3' ) # Lower right h2 = net.addHost( 'h2', ip='10.0.1.2' ) h3 = net.addHost( 'h3', ip='10.0.1.3' ) info( '*** Creating links ***\n' ) net.addLink( h0, edge_sw_0 ) net.addLink( h1, edge_sw_0 ) net.addLink( h2, edge_sw_1 ) net.addLink( h3, edge_sw_1 ) net.addLink( edge_sw_0, aggr_sw_2) net.addLink( edge_sw_0, aggr_sw_3) net.addLink( edge_sw_1, aggr_sw_2) net.addLink( edge_sw_1, aggr_sw_3) net.addLink( cs_0, aggr_sw_2) net.addLink( cs_1, aggr_sw_2) net.addLink( cs_2, aggr_sw_3) net.addLink( cs_3, aggr_sw_3) ################################################# ## Pod 1 info( '*** Pod - 1 ***\n' ) info( '*** Adding switches ***\n' ) edge_sw_4 = net.addSwitch( 'edge_sw_4' ) edge_sw_5 = net.addSwitch( 'edge_sw_5' ) aggr_sw_6 = net.addSwitch( 'aggr_sw_6' ) aggr_sw_7 = net.addSwitch( 'aggr_sw_7' ) info( '*** Adding hosts ***\n' ) # Lower left h4 = net.addHost( 'h4', ip='10.1.0.2' ) h5 = net.addHost( 'h5', ip='10.1.0.3' ) # Lower right h6 = net.addHost( 'h6', ip='10.1.1.2' ) h7 = net.addHost( 'h7', ip='10.1.1.3' ) info( '*** Creating links ***\n' ) net.addLink( h4, edge_sw_4 ) net.addLink( h5, edge_sw_4 ) net.addLink( h6, edge_sw_5 ) net.addLink( h7, edge_sw_5 ) net.addLink( edge_sw_4, aggr_sw_6) net.addLink( edge_sw_4, aggr_sw_7) net.addLink( edge_sw_5, aggr_sw_6) net.addLink( edge_sw_5, aggr_sw_7) net.addLink( cs_0, aggr_sw_6) net.addLink( cs_1, aggr_sw_6) net.addLink( cs_2, aggr_sw_7) net.addLink( cs_3, aggr_sw_7) ################################################ ## Pod 2 info( '*** Pod - 2 ***\n' ) info( '*** Adding switches ***\n' ) edge_sw_8 = net.addSwitch( 'edge_sw_8' ) edge_sw_9 = net.addSwitch( 'edge_sw_9' ) aggr_sw_10 = net.addSwitch( 'aggr_sw_10' ) aggr_sw_11 = net.addSwitch( 'aggr_sw_11' ) info( '*** Adding hosts ***\n' ) # Lower left h8 = net.addHost( 'h8', ip='10.2.0.2' ) h9 = net.addHost( 'h9', ip='10.2.0.3' ) # Lower right h10 = net.addHost( 'h10', ip='10.2.1.2' ) h11 = net.addHost( 'h11', ip='10.2.1.3' ) info( '*** Creating links ***\n' ) net.addLink( h8, edge_sw_8 ) net.addLink( h9, edge_sw_8 ) net.addLink( h10, edge_sw_9 ) net.addLink( h11, edge_sw_9 ) net.addLink( edge_sw_8, aggr_sw_10) net.addLink( edge_sw_8, aggr_sw_11) net.addLink( edge_sw_9, aggr_sw_10) net.addLink( edge_sw_9, aggr_sw_11) net.addLink( cs_0, aggr_sw_10) net.addLink( cs_1, aggr_sw_10) net.addLink( cs_2, aggr_sw_11) net.addLink( cs_3, aggr_sw_11) ############################################## ## Pod 3 info( '*** Pod - 3 ***\n' ) info( '*** Adding switches ***\n' ) edge_sw_12 = net.addSwitch( 'edge_sw_12' ) edge_sw_13 = net.addSwitch( 'edge_sw_13' ) aggr_sw_14 = net.addSwitch( 'aggr_sw_14' ) aggr_sw_15 = net.addSwitch( 'aggr_sw_15' ) info( '*** Adding hosts ***\n' ) # Lower left h12 = net.addHost( 'h12', ip='10.3.0.2' ) h13 = net.addHost( 'h13', ip='10.3.0.3' ) # Lower right h14 = net.addHost( 'h14', ip='10.3.1.2' ) h15 = net.addHost( 'h15', ip='10.3.1.3' ) info( '*** Creating links ***\n' ) net.addLink( h12, edge_sw_12 ) net.addLink( h13, edge_sw_12 ) net.addLink( h14, edge_sw_13 ) net.addLink( h15, edge_sw_13 ) net.addLink( edge_sw_12, aggr_sw_14) net.addLink( edge_sw_12, aggr_sw_15) net.addLink( edge_sw_13, aggr_sw_14) net.addLink( edge_sw_13, aggr_sw_15) net.addLink( cs_0, aggr_sw_14) net.addLink( cs_1, aggr_sw_14) net.addLink( cs_2, aggr_sw_15) net.addLink( cs_3, aggr_sw_15) net.build() net.start() CLI(net) net.stop()
def createNetwork(): #send rate at each link in Mbps bwg = 1 #in Mbps bwbn = 1 #in Mbps mqs = 100 #max queue size of interfaces dly = '2.5ms' apps = 4 #number of other UDP applications = number of DRR classes - 1 [MAXIMUM = 9(!)] qlim = int( mqs / (apps + 1)) #limit of queue in DRR is mqs divided by the number of apps + 1 qlim = 5 #TODO delete (just a tryout) appquantum = 1500 #quantum for UDP traffic quicquantum = 55 # quantum for QUIC traffic #create empty network net = Mininet(intf=TCIntf) info('\n*** Adding controller\n') net.addController('c0') #is it ok ? #add host to topology ht = net.addHost('ht', ip='10.10.0.1/24') hu = net.addHost('hu', ip='10.10.0.2/24') it = net.addHost('it', ip='10.20.0.1/24') iu = net.addHost('iu', ip='10.20.0.2/24') rh = net.addHost('rh', ip='10.10.0.10/24') ri = net.addHost('ri', ip='10.20.0.20/24') info('\n** Adding Switches\n') # Adding 2 switches to the network sw1 = net.addSwitch('sw1') sw2 = net.addSwitch('sw2') info('\n** Creating Links \n') #create link beetween the network link_ht_sw1 = net.addLink(ht, sw1) link_hu_sw1 = net.addLink(hu, sw1, intfName1='hu-eth0') link_rh_sw1 = net.addLink(rh, sw1, intfName1='rh-eth0') link_it_sw2 = net.addLink(it, sw2) link_iu_sw2 = net.addLink(iu, sw2) link_ri_sw2 = net.addLink(ri, sw2, intfName1='ri-eth0') link_rh_ri = net.addLink(rh, ri, intfName1='rh-eth1', intfName2='ri-eth1') #set bandwith link_ht_sw1.intf1.config(bw=bwbn, max_queue_size=mqs) link_hu_sw1.intf1.config(bw=bwbn, max_queue_size=mqs) link_rh_sw1.intf1.config( bw=bwbn, max_queue_size=mqs ) #max_queue_size is hardcoded low to prevent bufferbloat, too high queuing delays link_it_sw2.intf1.config(bw=bwg, max_queue_size=mqs) link_iu_sw2.intf1.config(bw=bwg, max_queue_size=mqs) link_ri_sw2.intf1.config(bw=bwg, max_queue_size=mqs, delay=dly) #delay is set at ri on both interfaces link_rh_ri.intf1.config( bw=bwg, max_queue_size=mqs) #loss is set at rh on its interface to ri only link_ht_sw1.intf2.config(bw=bwbn, max_queue_size=mqs) link_hu_sw1.intf2.config(bw=bwbn, max_queue_size=mqs) link_rh_sw1.intf2.config(bw=bwbn, max_queue_size=mqs) link_it_sw2.intf2.config(bw=bwg, max_queue_size=mqs) link_iu_sw2.intf2.config(bw=bwg, max_queue_size=mqs) link_ri_sw2.intf2.config(bw=bwg, max_queue_size=mqs) link_rh_ri.intf2.config(bw=bwg, max_queue_size=mqs, delay=dly) #delay is set at ri on both interfaces net.start() info('\n*** Configuring hosts\n') rh.cmd( 'ifconfig rh-eth1 10.12.0.10 netmask 255.255.255.0' ) #reconfiguring mutiples intefaces host to prevent mininet strange initialisation behaviors rh.cmd('ifconfig rh-eth0 10.10.0.10 netmask 255.255.255.0') rh.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward' ) #enable forwarding at routers ri.cmd( 'ifconfig ri-eth1 10.12.0.20 netmask 255.255.255.0' ) #reconfiguring mutiples intefaces host to prvent mininet strange initialisation behaviors ri.cmd('ifconfig ri-eth0 10.20.0.20 netmask 255.255.255.0') ri.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward' ) #enable forwarding at routers #configure host default gateways ht.cmd('ip route add default via 10.10.0.10') hu.cmd('ip route add default via 10.10.0.10') it.cmd('ip route add default via 10.20.0.20') iu.cmd('ip route add default via 10.20.0.20') #configure router routing tables rh.cmd('ip route add default via 10.12.0.20') ri.cmd('ip route add default via 10.12.0.10') # weiyu: iu.cmd('touch server.pcap') hu.cmd('touch client.pcap') rh.cmd('tc qdisc del dev rh-eth1 root') start_nodes(rh, ri, iu, hu, mqs, it, ht, apps, appquantum, quicquantum, qlim) #experiment actions it.cmd( 'ethtool -K it-eth0 tx off sg off tso off' ) #disable TSO on TCP on defaul TCP sender need to be done on other host if sending large TCP file from other nodes time.sleep(5) hu.cmd('sudo tc -s -g qdisc show dev hu-eth0 >> tc.log') hu.cmd('sudo tc -s -g class show dev hu-eth0 >> tc.log') hu.cmd('echo "class show done (running).\n" >> tc.log') # Enable the mininet> prompt if uncommented info('\n*** Running CLI\n') CLI(net) hu.cmd('sudo tc -s -g class show dev hu-eth0 >> tc.log') hu.cmd('echo "class show done (end).\n" >> tc.log') # stops the simulation net.stop()
def main(): nb_hosts, nb_switches, links = read_topo() topo = MyTopo(args.behavioral_exe, args.json, nb_hosts, nb_switches, links) net = Mininet(topo=topo, host=P4Host, switch=P4Switch, autoStaticArp=True, controller=None, autoSetMacs=True) net.start() for n in xrange(nb_hosts): h = net.get('h%d' % (n + 1)) for off in ["rx", "tx", "sg"]: cmd = "/sbin/ethtool --offload eth0 %s off" % off print cmd h.cmd(cmd) print "disable ipv6" h.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1") h.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1") h.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1") h.cmd("sysctl -w net.ipv4.tcp_congestion_control=reno") h.cmd( "iptables -I OUTPUT -p icmp --icmp-type destination-unreachable -j DROP" ) h.cmd("sh h%d.vlan" % (n + 1)) # Create interface eth0.16 and add static ARP entries sleep(1) for i in xrange(nb_switches): # Create mirror_id j to clone packets towards j-th port for j in range(len(topo.ports['s' + str(i + 1)])): cmd = [ 'echo "mirroring_add', str(j + 1), str(j + 1), '" | ~/bmv2/targets/simple_switch/sswitch_CLI', args.json, str(_THRIFT_BASE_PORT + i) ] print " ".join(cmd) subprocess.call(" ".join(cmd), shell=True) # added "--pre SimplePreLAG" refer to https://github.com/p4lang/behavioral-model cmd = [ args.cli, "--json", args.json, "--thrift-port", str(_THRIFT_BASE_PORT + i), "--pre", "SimplePreLAG" ] with open("commands_%d.txt" % (i + 1), "r") as f: print " ".join(cmd) try: output = subprocess.check_output(cmd, stdin=f) print output except subprocess.CalledProcessError as e: print e print e.output sleep(1) print "Ready !" makeTerm( net['s1'], title="Redirect node - Primary path", cmd= "tcpdump -n -i s1-eth2 -Uw - | tcpdump -en -r - vlan;echo;echo;echo Last command: \x1B[32m'tcpdump -n -i s1-eth2 -Uw - | tcpdump -en -r - vlan'\x1B[0m; bash" ) makeTerm( net['s1'], title="Redirect node - Backup path", cmd= "tcpdump -n -i s1-eth3 -Uw - | tcpdump -en -r - vlan;echo;echo;echo Last command: \x1B[32m'tcpdump -n -i s1-eth3 -Uw - | tcpdump -en -r - vlan'\x1B[0m; bash" ) makeTerm( net['s2'], title="Detect node - Detected link", cmd= "tcpdump -n -i s2-eth2 -Uw - | tcpdump -en -r - vlan;echo;echo;echo Last command: \x1B[32m'tcpdump -n -i s2-eth2 -Uw - | tcpdump -en -r - vlan'\x1B[0m; bash" ) makeTerm( net['h1'], title="Host H1", cmd= "ping 192.168.100.2;echo;echo;echo Last command: \x1B[32mping 192.168.100.2 -i 1\x1B[0m; bash" ) CLI(net) net.stop()
def defineNetwork(): if len(sys.argv) < 2: print "Missing paramenter: python hotmboxes16-topo-2016-02-08.py <debug=1|0>" sys.exit() #commento debug = sys.argv[1] #print some usefull information net = Mininet(topo=None, build=False, link=TCLink) net.addController(name='c0', controller=RemoteController, # ip='192.168.224.133', port=6633) #net = Mininet(controller=RemoteController, link=TCLink, build=False, xterms=True) info("*** Create an empty network and add nodes and swith to it *** \n") #net = Mininet(controller=RemoteController, link=TCLink, build=False) #MyPOXController info("\n*** Adding Controller: Controller will be external *** \n") #BUILDING CLUSTER 1: s1, s2, s3, s4 and vmu1, vmu2, dpi, wana, tc info("\n*** Creating Switch *** \n") s1 = net.addSwitch('s1') # this should be equivalent to s1 = net.addSwitch('s1', OVSSwitch) s1.cmd( 'ovs-vsctl del-br ' + s1.name ) s1.cmd( 'ovs-vsctl add-br ' + s1.name ) s1.cmd( 'ovs-vsctl set Bridge '+ s1.name + ' stp_enable=false' ) # Disabling STP s2 = net.addSwitch('s2') s2.cmd( 'ovs-vsctl del-br ' + s2.name ) s2.cmd( 'ovs-vsctl add-br ' + s2.name ) s2.cmd( 'ovs-vsctl set Bridge '+ s2.name + ' stp_enable=false' ) # Disabling STP s3 = net.addSwitch('s3') s3.cmd( 'ovs-vsctl del-br ' + s3.name ) s3.cmd( 'ovs-vsctl add-br ' + s3.name ) s3.cmd( 'ovs-vsctl set Bridge '+ s3.name + ' stp_enable=false' ) # Disabling STP s4 = net.addSwitch('s4') s4.cmd( 'ovs-vsctl del-br ' + s4.name ) s4.cmd( 'ovs-vsctl add-br ' + s4.name ) s4.cmd( 'ovs-vsctl set Bridge '+ s4.name + ' stp_enable=false' ) # Disabling STP info("\n*** Creating VM-User 1 *** \n") vmu1 = net.addHost('VMU1') info("\n*** Creating VM-User 2 *** \n") vmu2 = net.addHost('VMU2') info("\n*** Creating DPI *** \n") dpi = net.addHost('DPI') info("\n*** Creating WAN A. *** \n") wana = net.addHost('WANA') info("\n*** Creating TC *** \n") tc = net.addHost('TC') info("\n*** Creating Virtual Router 1 *** \n") vr1 = net.addHost('VR1') info("\n*** Creating Links on Cluster 1 *** \n") net.addLink(vmu1, s1, bw=100) net.addLink(vmu2, s1, bw=100) net.addLink(dpi, s1, bw=100) net.addLink(wana, s1, bw=100) net.addLink(wana, s1, bw=100) net.addLink(tc, s1, bw=100) net.addLink(tc, s1, bw=100) net.addLink(s1, s2, bw=100) net.addLink(s1, s3, bw=100) net.addLink(s2, s4, bw=100) net.addLink(s3, s4, bw=100) net.addLink(vr1, s4, bw=100) #BUILDING CLUSTER 2: s5, s6, s7, s8 and h2, wana2, vr2, vr3 s5 = net.addSwitch('s5') s5.cmd( 'ovs-vsctl del-br ' + s5.name ) s5.cmd( 'ovs-vsctl add-br ' + s5.name ) s5.cmd( 'ovs-vsctl set Bridge '+ s5.name + ' stp_enable=false' ) # Disabling STP s6 = net.addSwitch('s6') s6.cmd( 'ovs-vsctl del-br ' + s6.name ) s6.cmd( 'ovs-vsctl add-br ' + s6.name ) s6.cmd( 'ovs-vsctl set Bridge '+ s6.name + ' stp_enable=false' ) # Disabling STP s7 = net.addSwitch('s7') s7.cmd( 'ovs-vsctl del-br ' + s7.name ) s7.cmd( 'ovs-vsctl add-br ' + s7.name ) s7.cmd( 'ovs-vsctl set Bridge '+ s7.name + ' stp_enable=false' ) # Disabling STP s8 = net.addSwitch('s8') s8.cmd( 'ovs-vsctl del-br ' + s8.name ) s8.cmd( 'ovs-vsctl add-br ' + s8.name ) s8.cmd( 'ovs-vsctl set Bridge '+ s8.name + ' stp_enable=false' ) # Disabling STP info("\n*** Creating Host 2 *** \n") h2 = net.addHost('H2') info("\n*** Creating WAN A. 2 *** \n") wana2 = net.addHost('WANA2') info("\n*** Creating VR2 *** \n") vr2 = net.addHost('VR2') info("\n*** Creating VR3 *** \n") vr3 = net.addHost('VR3') info("\n*** Creating Links on Cluster 2 *** \n") net.addLink(vr1, vr2, bw=100) net.addLink(vr2, s5, bw=100) net.addLink(s5, s6, bw=100) net.addLink(s5, s8, bw=100) net.addLink(h2, s8, bw=100) net.addLink(wana2, s6, bw=100) net.addLink(wana2, s6, bw=100) net.addLink(s6, s7, bw=100) net.addLink(s8, s7, bw=100) net.addLink(s7, vr3, bw=100) #BUILDING CLUSTER 3: s9, s10, s11, s12 and h3, wana3, vr4 s9 = net.addSwitch('s9') s9.cmd( 'ovs-vsctl del-br ' + s9.name ) s9.cmd( 'ovs-vsctl add-br ' + s9.name ) s9.cmd( 'ovs-vsctl set Bridge '+ s9.name + ' stp_enable=false' ) # Disabling STP s10 = net.addSwitch('s10') s10.cmd( 'ovs-vsctl del-br ' + s10.name ) s10.cmd( 'ovs-vsctl add-br ' + s10.name ) s10.cmd( 'ovs-vsctl set Bridge '+ s10.name + ' stp_enable=false' ) # Disabling STP s11 = net.addSwitch('s11') s11.cmd( 'ovs-vsctl del-br ' + s11.name ) s11.cmd( 'ovs-vsctl add-br ' + s11.name ) s11.cmd( 'ovs-vsctl set Bridge '+ s11.name + ' stp_enable=false' ) # Disabling STP s12 = net.addSwitch('s12') s12.cmd( 'ovs-vsctl del-br ' + s12.name ) s12.cmd( 'ovs-vsctl add-br ' + s12.name ) s12.cmd( 'ovs-vsctl set Bridge '+ s12.name + ' stp_enable=false' ) # Disabling STP info("\n*** Creating Host 3 *** \n") h3 = net.addHost('H3') info("\n*** Creating WAN A. 3 *** \n") wana3 = net.addHost('WANA3') info("\n*** Creating VR4 *** \n") vr4 = net.addHost('VR4') info("\n*** Creating Links on Cluster 3 *** \n") net.addLink(vr4, vr3, bw=100) net.addLink(vr4, s9, bw=100) net.addLink(s9, s10, bw=100) net.addLink(s9, s12, bw=100) net.addLink(wana3, s10, bw=100) net.addLink(wana3, s10, bw=100) net.addLink(s10, s11, bw=100) net.addLink(s11, s12, bw=100) net.addLink(s11, h3, bw=100) #Trying to assign MAC address to each node of the cluster 1 vmu1.setMAC("00:00:00:00:00:01", vmu1.name + "-eth0") vmu2.setMAC("00:00:00:00:00:02", vmu2.name + "-eth0") dpi.setMAC("00:00:00:00:00:03", dpi.name + "-eth0") wana.setMAC("00:00:00:00:00:04", wana.name + "-eth0") wana.setMAC("00:00:00:00:00:05", wana.name + "-eth1") tc.setMAC("00:00:00:00:00:06", tc.name + "-eth0") tc.setMAC("00:00:00:00:00:07", tc.name + "-eth1") vr1.setMAC("00:00:00:00:00:08", vr1.name + "-eth0") vr1.setMAC("00:00:00:00:00:09", vr1.name + "-eth1") #Trying to assign MAC address to each node of the cluster 2 vr2.setMAC("00:00:00:00:00:0A", vr2.name + "-eth0") vr2.setMAC("00:00:00:00:00:0B", vr2.name + "-eth1") h2.setMAC("00:00:00:00:00:0C", h2.name + "-eth0") wana2.setMAC("00:00:00:00:00:0D", wana2.name + "-eth0") wana2.setMAC("00:00:00:00:00:0E", wana2.name + "-eth1") vr3.setMAC("00:00:00:00:00:0F", vr3.name + "-eth0") vr3.setMAC("00:00:00:00:00:10", vr3.name + "-eth1") #Trying to assign MAC address to each node of the cluster 3 vr4.setMAC("00:00:00:00:00:11", vr4.name + "-eth0") vr4.setMAC("00:00:00:00:00:12", vr4.name + "-eth1") wana3.setMAC("00:00:00:00:00:13", wana3.name + "-eth0") wana3.setMAC("00:00:00:00:00:14", wana3.name + "-eth1") h3.setMAC("00:00:00:00:00:15", h3.name + "-eth0") #Disabling IPv6 for host in net.hosts: print 'Going to disable IPv6 on ' + host.name host.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1') host.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1') host.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1') for switch in net.switches: for intf in switch.intfs.values(): switch.cmd( 'ovs-vsctl add-port ' + switch.name + ' %s' % intf ) print "Eseguito comando: ovs-vsctl add-port ", switch.name, " ", intf #info("\n*** Starting Network using Open vSwitch and remote controller*** \n") # Set the controller for the switch for switch in net.switches: switch.cmd('ovs-vsctl set-controller ' + switch.name + ' tcp:192.168.56.11:6633') info( '\n*** Waiting for switch to connect to controller' ) while 'is_connected' not in quietRun( 'ovs-vsctl show' ): sleep( 1 ) info( '.' ) info('\n') # Creating a Linux Bridge on each host nhosts = len(net.hosts) print 'Total number of hosts: ' + str(nhosts) count = 1 net.start() info('\n*** Going to take down default configuration ...\n') info('\n*** ... and creating Linux bridge on WANA and TC, as well as configuring interfaces \n') for host in net.hosts: print 'Deleting ip address on ' + host.name + '-eth0 interface ...' host.cmd('ip addr del ' + host.IP(host.name + '-eth0') + '/8 dev ' + host.name + '-eth0') print 'Deleting entry in IP routing table on ' + host.name host.cmd('ip route del 10.0.0.0/8') print "Going to configure new IP" if host.name == 'WANA' or host.name == 'WANA2' or host.name == 'WANA3' or host.name == 'TC': # VNFs case print "Host with 2 interfaces: " + host.name host.cmd('brctl addbr br-' + host.name) host.cmd('brctl addif br-' + host.name + ' ' + host.name + '-eth0') host.cmd('brctl addif br-' + host.name + ' ' + host.name + '-eth1') if host.name == 'WANA' or host.name == 'TC': host.cmd('ip addr add 192.168.1.' + str(count) + '/24 dev br-' + host.name) elif host.name == 'WANA2': host.cmd('ip addr add 192.168.2.' + str(count) + '/24 dev br-' + host.name) else: host.cmd('ip addr add 192.168.3.' + str(count) + '/24 dev br-' + host.name) host.cmd('ip link set br-' + host.name + ' up') print "LB configured!" host.cmd('sysctl -w net.ipv4.ip_forward=1') print "IP Forwarding enabled!" elif host.name == 'VMU1' or host.name == 'VMU2' or host.name == 'DPI' or host.name == 'H2' or host.name == 'H3': if host.name == 'VMU1' or host.name == 'VMU2' or host.name == 'DPI': # Machine on cluster 1 host.setIP("192.168.1." + str(count), 24, host.name + "-eth0") elif host.name == 'H2': # Machine on cluster 2 host.setIP("192.168.2." + str(count), 24, host.name + "-eth0") else: # Machine on cluster 3 host.setIP("192.168.3." + str(count), 24, host.name + "-eth0") print "[CURRENT-CHECK] IP: " + net.hosts[count - 1].IP(net.hosts[count - 1].name + '-eth0') elif host.name == 'VR1' or host.name == 'VR3': if host.name == 'VR1': host.setIP("192.168.1." + str(count), 24, host.name + "-eth0") elif host.name == 'VR3': host.setIP("192.168.2." + str(count), 24, host.name + "-eth0") net.hosts[count - 1].setIP("10.0.0." + str(count - 5), 30, net.hosts[count - 1].name + "-eth1") net.hosts[count + 2].setIP("10.0.0." + str(count - 4), 30, net.hosts[count + 2].name + "-eth0") # also configuring VR2-eth0 and VR4-eth0 print net.hosts[count - 1].name + "-eth1 interface has been configured!" print "[Checking VR IP] " + net.hosts[count - 1].IP(host.name + '-eth1') net.hosts[count - 1].cmd('sysctl -w net.ipv4.ip_forward=1') # enabled on VR1 print "On VR node: IP Forwarding enabled!" else: # VR2 or VR4 case if host.name == 'VR2': host.setIP("192.168.2." + str(count), 24, host.name + "-eth1") else: # VR4 host.setIP("192.168.3." + str(count), 24, host.name + "-eth1") net.hosts[count - 1].cmd('sysctl -w net.ipv4.ip_forward=1') print "On VR node: IP Forwarding enabled!" count = count + 1 print "\n" # ARP storm avoidance rules for switch in net.switches: if switch.name == 's1': switch.cmd('ovs-ofctl add-flow ' + switch.name + ' in_port=4,arp,dl_dst=FF:FF:FF:FF:FF:FF,actions=drop') switch.cmd('ovs-ofctl add-flow ' + switch.name + ' in_port=5,arp,dl_dst=FF:FF:FF:FF:FF:FF,actions=drop') switch.cmd('ovs-ofctl add-flow ' + switch.name + ' in_port=6,arp,dl_dst=FF:FF:FF:FF:FF:FF,actions=drop') switch.cmd('ovs-ofctl add-flow ' + switch.name + ' in_port=7,arp,dl_dst=FF:FF:FF:FF:FF:FF,actions=drop') elif switch.name == 's6' or switch.name == 's10': switch.cmd('ovs-ofctl add-flow ' + switch.name + ' in_port=2,arp,dl_dst=FF:FF:FF:FF:FF:FF,actions=drop') switch.cmd('ovs-ofctl add-flow ' + switch.name + ' in_port=3,arp,dl_dst=FF:FF:FF:FF:FF:FF,actions=drop') for switch in net.switches: print "Rules installed on switch " + switch.name + ": " + switch.cmdPrint('ovs-ofctl dump-flows ' + switch.name) print "Configuring default gw on each host.. TODO" count = 1 for host in net.hosts: print "Adding default gw ..." '''if host.name != 'VR' and host.name != 'H1' and host.name != 'WANA' and host.name != 'TC': host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 2].IP(net.hosts[nhosts - 2].name + '-eth0')) elif host.name == 'TC' or host.name == 'WANA': print "Default GW manually configured" host.cmd('route add default gw ' + net.hosts[nhosts - 2].IP(net.hosts[nhosts - 2].name + '-eth0')) else: #H1 case host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 2].IP(net.hosts[nhosts - 2].name + '-eth1'))''' if host.name == 'VMU1' or host.name == 'VMU2': host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 8].IP(net.hosts[nhosts - 8].name + '-eth0')) elif host.name == 'H2': host.cmd('route add -net 192.168.1.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 5].IP(net.hosts[nhosts - 5].name + '-eth1')) host.cmd('route add -net 192.168.3.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 4].IP(net.hosts[nhosts - 4].name + '-eth0')) elif host.name == 'H3': host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 1].IP(net.hosts[nhosts - 1].name + '-eth1')) elif host.name == 'VR1': host.cmd('route add -net 192.168.2.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 5].IP(net.hosts[nhosts - 5].name + '-eth0')) host.cmd('route add -net 192.168.3.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 5].IP(net.hosts[nhosts - 5].name + '-eth0')) elif host.name == 'VR2': host.cmd('route add -net 192.168.1.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 8].IP(net.hosts[nhosts - 8].name + '-eth1')) host.cmd('route add -net 192.168.3.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 4].IP(net.hosts[nhosts - 4].name + '-eth0')) elif host.name == 'VR3': host.cmd('route add -net 192.168.1.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 5].IP(net.hosts[nhosts - 5].name + '-eth1')) host.cmd('route add -net 192.168.3.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 1].IP(net.hosts[nhosts - 1].name + '-eth0')) elif host.name == 'VR4': host.cmd('route add -net 192.168.1.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 4].IP(net.hosts[nhosts - 4].name + '-eth1')) host.cmd('route add -net 192.168.2.0 netmask 255.255.255.0 gw ' + net.hosts[nhosts - 4].IP(net.hosts[nhosts - 4].name + '-eth1')) else: print "Host " + host.name + ": routing table currently not configured" info('... running CLI \n***') CLI(net) info('\n') info('... stopping Network ***\n') net.stop()
def MininetTopo(argv): # net = Mininet(controller=RemoteController, link=TCLink, switch=OVSKernelSwitch) net = Mininet() info("Create host nodes.\n") h10 = net.addHost('h10', ip='no ip defined/8', mac='00:00:00:00:00:01') h20 = net.addHost('h20', ip='no ip defined/8', mac='00:00:00:00:00:02') h30 = net.addHost('h30', ip='no ip defined/8', mac='00:00:00:00:00:03') h40 = net.addHost('h40', ip='no ip defined/8', mac='00:00:00:00:00:04') hosts = ['h10', 'h20', 'h30', 'h40'] dhcp_server = net.addHost('h50', ip='192.168.1.1/24') info("Create switch node.\n") s10 = net.addSwitch('s10', switch=OVSSwitch, failMode='secure', protocols='OpenFlow13') s20 = net.addSwitch('s20', switch=OVSSwitch, failMode='secure', protocols='OpenFlow13') s30 = net.addSwitch('s30', switch=OVSSwitch, failMode='secure', protocols='OpenFlow13') info("Create Links.\n") net.addLink(h10, s10, 0, 1) net.addLink(h20, s10, 0, 2) net.addLink(s10, s20, 3, 1) net.addLink(s20, s30, 2, 4) net.addLink(h30, s30, 0, 1) net.addLink(h40, s30, 0, 2) net.addLink(dhcp_server, s30, 0, 3) # DHCP must be h50-eth0 # net.addLink(dhcp_server,h1,0,0) info("Create Controller.\n") c0 = net.addController(name='c0', controller=RemoteController, ip='127.0.0.1', port=6633) info("Build and start network.\n") net.build() s10.start([c0]) s20.start([c0]) s30.start([c0]) # start dhcp server dhcp_server.cmd("~/bin/start_dhcp") # for host in hosts: # host.cmd("~/bin/dump") # host.cmd("dhclient -r") # host.cmd("dhclient") print("switch dump set") s10.cmd("~/bin/dump s10-eth1") s10.cmd("~/bin/dump s10-eth2") s10.cmd("~/bin/dump s10-eth3") s20.cmd("~/bin/dump s20-eth1") s20.cmd("~/bin/dump s20-eth2") s30.cmd("~/bin/dump s30-eth1") s30.cmd("~/bin/dump s30-eth2") s30.cmd("~/bin/dump s30-eth3") s30.cmd("~/bin/dump s30-eth4") print("host dump set") h10.cmd("~/bin/dump") h20.cmd("~/bin/dump") h30.cmd("~/bin/dump") h40.cmd("~/bin/dump") print("dhcp release") h10.cmd("dhclient -r") h20.cmd("dhclient -r") h30.cmd("dhclient -r") h40.cmd("dhclient -r") print("dhcp request") h10.cmd("dhclient") h20.cmd("dhclient") h30.cmd("dhclient") h40.cmd("dhclient") print "start" net.start() info("Run mininet CLI.\n") CLI(net) h10.cmd("kill `pidof SCREEN`") net.stop()
def emptyNet(): net = Mininet(controller=RemoteController, switch=OVSKernelSwitch) info("*** Creating (reference) controllers") controllers = [None] * (len(sys.argv) - 7) for i in range(len(controllers)): print "\n remote controller : " + sys.argv[i + 6] controllers[i] = net.addController('c' + str(i), controller=RemoteController, ip=sys.argv[i + 6], port=6633) info("\n\n*** Creating switches\n") numberOfSwitch = int(sys.argv[1]) switchs = [None] * numberOfSwitch for i in range(numberOfSwitch): switchs[i] = net.addSwitch('s' + str(i)) info('s' + str(i) + ' ') info("\n\n*** Creating hosts\n") numberOfLeave = (numberOfSwitch + 1) / 2 numberOfHost = int(sys.argv[2]) numberOfSends = numberOfHost / 2 hosts = [None] * numberOfHost for i in range(numberOfHost): info('h' + str(i) + ' ') hosts[i] = net.addHost('h' + str(i)) info("\n\n*** Creating links\n") for i in range(1, numberOfSwitch): net.addLink(switchs[i], switchs[(i - 1) / 2]) info('(s' + str(i) + ',s' + str((i - 1) / 2) + ') ') info("\n") leaveNumber = 0 startLeave = numberOfSwitch - numberOfLeave for i in range(numberOfHost): switchs[leaveNumber + startLeave].linkTo(hosts[i]) info('(s' + str(leaveNumber + startLeave) + ',h' + str(i) + ') ') leaveNumber = leaveNumber + 1 if leaveNumber >= numberOfLeave: leaveNumber = 0 info("\n\n*** Starting network\n\n") net.build() for controller in controllers: controller.start() for i in range(numberOfSwitch): switchs[i].start(controllers) info("s%d " % (i)) net.start() info("\n\n*** gennerate send rate \n") sendRate = int(sys.argv[5]) beta = 1.0 / sendRate Y = np.random.exponential(beta, numberOfSends) info("\n\n*** gennerate random send package command \n") directoryName = sys.argv[len(sys.argv) - 1] directoryName = directoryName + "/ping" print numberOfHost randHost = hosts[:] random.shuffle(randHost) print len(randHost) cmds = [None] * numberOfSends for i in range(numberOfSends): cmds[i] = "python ping.py " + randHost[i].IP() + " " + randHost[ i + numberOfSends].IP() + " >> " + directoryName + "/" + str( i).zfill(6) + ".csv &" for i in range(30): sys.stdout.write("\r" + str(30 - i) + " ") sys.stdout.flush() sleep(1) info("\n\n*** Testing network\n\n") cmd2 = [ "sshpass", "-p", "password", "ssh", "[email protected]", "'./start-cpu-mem-capture.sh' &" ] process2 = subprocess.Popen(cmd2) cmd = [ './openflow-sniffex', sys.argv[3], sys.argv[4], sys.argv[len(sys.argv) - 1] + "/cap.csv" ] process = subprocess.Popen(cmd) for i in range(3): sys.stdout.write("\r" + str(3 - i) + " ") sys.stdout.flush() sleep(1) info("\n\n*** Start send package\n") for i in range(numberOfSends): randHost[i].cmd(cmds[i]) sleep(Y[i]) for i in range(30): sys.stdout.write("\r" + str(120 - i) + " ") sys.stdout.flush() sleep(1) process.kill() for i in range(90): sys.stdout.write("\r" + str(90 - i) + " ") sys.stdout.flush() sleep(1) # CLI( net ) net.stop()
class Topogen(object): "A topology test builder helper." CONFIG_SECTION = 'topogen' def __init__(self, cls): self.config = None self.topo = None self.net = None self.gears = {} self.routern = 1 self.switchn = 1 self._init_topo(cls) @staticmethod def _mininet_reset(): "Reset the mininet environment" # Clean up the mininet environment os.system('mn -c > /dev/null 2>&1') def _init_topo(self, cls): """ Initialize the topogily provided by the user. The user topology class must call get_topogen() during build() to get the topogen object. """ # Set the global variable so the test cases can access it anywhere set_topogen(self) # Load the default topology configurations self._load_config() # Initialize the API self._mininet_reset() cls() self.net = Mininet(controller=None, topo=self.topo) for gear in self.gears.values(): gear.net = self.net def _load_config(self): """ Loads the configuration file `pytest.ini` located at the root dir of topotests. """ defaults = { 'verbosity': 'info', 'frrdir': '/usr/lib/frr', 'quaggadir': '/usr/lib/quagga', 'routertype': 'frr', 'memleak_path': None, } self.config = ConfigParser.ConfigParser(defaults) pytestini_path = os.path.join(CWD, '../pytest.ini') self.config.read(pytestini_path) def add_router(self, name=None, cls=topotest.Router, **params): """ Adds a new router to the topology. This function has the following options: * `name`: (optional) select the router name * `daemondir`: (optional) custom daemon binary directory * `routertype`: (optional) `quagga` or `frr` Returns a TopoRouter. """ if name is None: name = 'r{}'.format(self.routern) if name in self.gears: raise KeyError('router already exists') params['frrdir'] = self.config.get(self.CONFIG_SECTION, 'frrdir') params['quaggadir'] = self.config.get(self.CONFIG_SECTION, 'quaggadir') params['memleak_path'] = self.config.get(self.CONFIG_SECTION, 'memleak_path') if not params.has_key('routertype'): params['routertype'] = self.config.get(self.CONFIG_SECTION, 'routertype') self.gears[name] = TopoRouter(self, cls, name, **params) self.routern += 1 return self.gears[name] def add_switch(self, name=None, cls=topotest.LegacySwitch): """ Adds a new switch to the topology. This function has the following options: name: (optional) select the switch name Returns the switch name and number. """ if name is None: name = 's{}'.format(self.switchn) if name in self.gears: raise KeyError('switch already exists') self.gears[name] = TopoSwitch(self, cls, name) self.switchn += 1 return self.gears[name] def add_link(self, node1, node2, ifname1=None, ifname2=None): """ Creates a connection between node1 and node2. The nodes can be the following: * TopoGear * TopoRouter * TopoSwitch """ if not isinstance(node1, TopoGear): raise ValueError('invalid node1 type') if not isinstance(node2, TopoGear): raise ValueError('invalid node2 type') if ifname1 is None: ifname1 = node1.new_link() if ifname2 is None: ifname2 = node2.new_link() node1.register_link(ifname1, node2, ifname2) node2.register_link(ifname2, node1, ifname1) self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2) def routers(self): """ Returns the router dictionary (key is the router name and value is the router object itself). """ return dict((rname, gear) for rname, gear in self.gears.iteritems() if isinstance(gear, TopoRouter)) def start_topology(self, log_level=None): """ Starts the topology class. Possible `log_level`s are: 'debug': all information possible 'info': informational messages 'output': default logging level defined by Mininet 'warning': only warning, error and critical messages 'error': only error and critical messages 'critical': only critical messages """ # If log_level is not specified use the configuration. if log_level is None: log_level = self.config.get('topogen', 'verbosity') # Run mininet setLogLevel(log_level) self.net.start() def start_router(self, router=None): """ Call the router startRouter method. If no router is specified it is called for all registred routers. """ if router is None: # pylint: disable=r1704 for _, router in self.routers().iteritems(): router.start() else: if isinstance(router, str): router = self.gears[router] router.start() def stop_topology(self): "Stops the network topology" self.net.stop() def mininet_cli(self): """ Interrupt the test and call the command line interface for manual inspection. Should be only used on non production code. """ if not sys.stdin.isatty(): raise EnvironmentError( 'you must run pytest with \'-s\' in order to use mininet CLI') CLI(self.net)
class NetworkManager(): def __init__(self, topo, tcp_policy="tcp"): self.topo = topo self.net = None self.net_stopped = False self.host_ctrl_map = {} self.tcp_policy = tcp_policy self.prev_cc = get_congestion_control() load_congestion_control(tcp_policy) self.start_network() def _apply_qdisc(self, port): """ Here be dragons... """ # tc_cmd = "tc qdisc add dev %s " % (port) # cmd = "root handle 1: hfsc default 10" # log.info(tc_cmd + cmd) # dc_utils.exec_process(tc_cmd + cmd) # tc_cmd = "tc class add dev %s " % (port) # cmd = "parent 1: classid 1:10 hfsc sc rate %dbit ul rate %dbit" % ( # self.topo.max_bps, self.topo.max_bps) # log.info(tc_cmd + cmd) # dc_utils.exec_process(tc_cmd + cmd) limit = int(self.topo.max_queue) avg_pkt_size = 1500 # MTU packet size tc_cmd = "tc qdisc add dev %s " % (port) cmd = "root handle 1: htb default 10 " # cmd = "root handle 1: estimator 250msec 1sec htb default 10 " cmd += " direct_qlen %d " % (limit / avg_pkt_size) log.debug(tc_cmd + cmd) dc_utils.exec_process(tc_cmd + cmd) tc_cmd = "tc class add dev %s " % (port) cmd = "parent 1: classid 1:10 htb rate %dbit burst %d" % ( self.topo.max_bps, self.topo.max_bps) log.debug(tc_cmd + cmd) dc_utils.exec_process(tc_cmd + cmd) if self.tcp_policy == "dctcp": marking_threshold = calc_ecn(self.topo.max_bps, avg_pkt_size) # Apply aggressive RED to mark excess packets in the queue max_q = limit / 4 min_q = int(marking_threshold) tc_cmd = "tc qdisc add dev %s " % (port) cmd = "parent 1:10 handle 20:1 red " cmd += "limit %d " % (limit) cmd += "bandwidth %dbit " % self.topo.max_bps cmd += "avpkt %d " % avg_pkt_size cmd += "min %d " % min_q cmd += "max %d " % max_q # Ballpark burst hard limit... burst = (min_q + min_q + max_q) / (3 * avg_pkt_size) cmd += "burst %d " % burst cmd += "probability 0.1" cmd += " ecn " log.debug(tc_cmd + cmd) dc_utils.exec_process(tc_cmd + cmd) else: tc_cmd = "tc qdisc add dev %s " % (port) cmd = "parent 1:10 handle 20:1 bfifo " cmd += " limit %d" % limit dc_utils.exec_process(tc_cmd + cmd) # tc_cmd = "tc qdisc add dev %s " % (port) # cmd = "root handle 1 netem limit %d rate 10mbit" % ( # limit / avg_pkt_size) # log.info(tc_cmd + cmd) # dc_utils.exec_process(tc_cmd + cmd) # limit = int(self.topo.max_queue) # tc_cmd = "tc qdisc add dev %s " % (port) # cmd = "parent 1:10 handle 20: codel " # cmd += " limit %d" % (limit) # dc_utils.exec_process(tc_cmd + cmd) # limit = int(self.topo.max_queue) # max_q = self.topo.max_queue / 4 # min_q = max_q / 3 # tc_cmd = "tc qdisc add dev %s " % (port) # cmd = "parent 1:10 handle 20:1 sfq limit %d" % ( # self.topo.max_queue) # if self.dctcp: # dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1") # cmd += "ecn " # # cmd += "redflowlimit " # # cmd += "min %d " % (min_q) # # cmd += "max %d " % (max_q) # # cmd += "probability 1" # log.info(tc_cmd + cmd) # dc_utils.exec_process(tc_cmd + cmd) # Apply tc choke to mark excess packets in the queue with ecn # limit = int(self.topo.max_queue) # max_q = self.topo.max_queue # min_q = 400 # tc_cmd = "tc qdisc add dev %s " % (port) # cmd = "parent 1:10 handle 10:1 choke limit %d " % limit # cmd += "bandwidth %dbit " % self.topo.max_bps # cmd += "min %d " % (min_q) # cmd += "max %d " % (max_q) # cmd += "probability 0.001" # # if self.dctcp: # cmd += " ecn " # log.info(tc_cmd + cmd) # dc_utils.exec_process(tc_cmd + cmd) # tc_cmd = "tc qdisc add dev %s " % (port) # cmd = "parent 1:10 handle 30:1 fq_codel limit %d " % ( # self.topo.max_queue) # if ("dctcp" in self.conf) and self.conf["dctcp"]: # dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1") # cmd += "ecn " # log.info(tc_cmd + cmd) # dc_utils.exec_process(tc_cmd + cmd) dc_utils.exec_process("ip link set %s txqueuelen %d" % (port, limit / avg_pkt_size)) dc_utils.exec_process("ip link set %s mtu 1500" % port) def _connect_controller(self, net): controller = RemoteController(self.topo.switch_id + "_c") net.addController(controller) for i, host in enumerate(self.topo.host_list): # Configure host net.addLink(controller, host) # Configure controller ctrl_iface = "%s_c-eth%d" % (self.topo.switch_id, i) for index, switch in self.topo.ports[host].items(): switch_iface = switch[0] + "-eth" + str(switch[1]) self.host_ctrl_map[switch_iface] = ctrl_iface def _config_links(self, net): for switch in net.switches: for port in switch.intfList(): if port.name != "lo": self._apply_qdisc(port) def _config_hosts(self, net): for host in net.hosts: # Increase the maximum total buffer-space allocatable # This is measured in units of pages (4096 bytes) dc_utils.exec_process("sysctl -w net.ipv4.tcp_window_scaling=1", host) dc_utils.exec_process("sysctl -w net.ipv4.tcp_timestamps=1", host) dc_utils.exec_process("sysctl -w net.ipv4.tcp_sack=1", host) dc_utils.exec_process("sysctl -w net.ipv4.tcp_syn_retries=10", host) # dc_utils.exec_process( # "sysctl -w net.core.default_qdisc=pfifo_fast", host) # dc_utils.exec_process("sysctl -w net.ipv4.tcp_recovery=0") if self.tcp_policy == "dctcp": dc_utils.exec_process( "sysctl -w net.ipv4.tcp_congestion_control=dctcp", host) dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1", host) dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn_fallback=0", host) elif self.tcp_policy == "tcp_nv": dc_utils.exec_process( "sysctl -w net.ipv4.tcp_congestion_control=nv", host) elif self.tcp_policy == "pcc": dc_utils.exec_process( "sysctl -w net.ipv4.tcp_congestion_control=pcc", host) def _config_network(self, net): self.topo._config_topo() self._config_links(net) self._config_hosts(net) self._connect_controller(net) # log.info("Testing reachability after configuration...\n") # net.ping() # log.info("Testing bandwidth after configuration...\n") # net.iperf() def get_net(self): return self.net def get_topo(self): return self.topo def get_sw_ports(self): switches = self.net.switches sw_intfs = [] for switch in switches: for intf in switch.intfNames(): if intf is not "lo": sw_intfs.append(intf) return sw_intfs def get_host_ports(self): return self.host_ctrl_map.keys() def get_num_sw_ports(self): return self.topo.get_num_sw_ports() def get_num_hosts(self): return self.topo.get_num_hosts() def start_network(self): # Start Mininet self.net = Mininet(topo=self.topo, controller=None, autoSetMacs=True) self.net.start() self._config_network(self.net) self.net_stopped = False def stop_network(self): if not self.net_stopped: self.net_stopped = True log.info("Removing interfaces and restoring all network state.") if self.tcp_policy == "dctcp": dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=0") # reset the active host congestion control to the previous value cmd = "sysctl -w net.ipv4.tcp_congestion_control=%s" % self.prev_cc dc_utils.exec_process(cmd) log.info("Deleting the virtual network") self.net.stop() log.info("Successfully deleted the virtual network")
def run(depth, number_of_TX): setLogLevel('info') os.system('sudo rm -r EVAL-NXT-h*') number_of_hosts = 2**depth NXT_config(number_of_hosts) # you can change passphrases passphrases = { "h{}".format(i): (i - 1) % 10 for i in range(1, number_of_hosts + 1) } unique = time.strftime("%d_%m_%Y_%H:%M", time.localtime()) script_name = "tree_{}_depth_{}_transactions_with_link_up_and_down_NXT".format( str(depth), str(number_of_TX)) print("{}: started at {}".format(script_name, unique)) hosts = ["h{}".format(i) for i in range(1, number_of_hosts + 1)] topo = TreeTopo(depth) net = Mininet(topo, autoSetMacs=True) net.start() net_hosts = ["h{}".format(i) for i in range(1, len(net.hosts) + 1)] configNetParams(net, net_hosts) # I run only one sniffer otherwise it doesn't work well with many tcpdumps open; and also because the traffic is the same on all interfaces. net.nameToNode['h1'].cmd( "tcpdump -s0 -i h1-eth0 -U -w Captures/traffic_h1_{}_{}.pcap &".format( script_name, unique)) # the sleeps are to make tcpdump log well before traffic generation and before stop (stopping the net kills the processes that are writing to log file) time.sleep(2) for host in hosts: net.nameToNode[host].cmd("cd EVAL-NXT-{}".format(host)) net.nameToNode[host].cmd( "nohup timeout 300 ./run.sh > ../Nodes_Logs/log_{}_{}_{} &".format( host, script_name, unique)) # wait for initialization print('Waiting for initialization of NXT nodes') time.sleep(20) print('Inizialization done') net.nameToNode['h1'].cmd( "../Util/monitor_processes.sh ../Proc_Logs/processes_log_{}_{} &". format(script_name, unique)) # other commands here for host in hosts: net.nameToNode[host].cmd( "curl --data 'secretPhrase={}' http://{}:6876/nxt?requestType=startForging" .format(passphrases[host], hostToIp(host))) # leave the nodes some time forging and send transactions at regular intervals (a burst every 20 seconds for 10 times) # Start observing clients total_time = 200 observers = [] for host in hosts: observer = observingClient( total_time, "h{}".format(int(host[1]) + number_of_hosts + 1), net, host) observers.append(observer) observer.start() # Generate transactions tx_burst = 50 n_bursts = number_of_TX // tx_burst # n_bursts = 10 # tx_burst = number_of_TX // n_bursts amounts = [i * 10**8 for i in [50, 100, 200]] fees = [i * 10**8 for i in [1, 5, 10]] # Link down for the second quarter of the total time start = time.time() elapsed = 0 stop_it = False for i in range(n_bursts): # round nodes print("Transaction burst number {}".format(i + 1)) for j in range(tx_burst): host_num = j % len(hosts) + 1 host_IP = hostToIp('h{}'.format(host_num)) try: net.nameToNode['h{}'.format(number_of_hosts + 1)].cmd( "curl -m 1 --data 'recipient=NXT-2543-6FUN-HS5W-BNVW6&secretPhrase={}&deadline=1440&phased=false&phasingHashedSecretAlgorithm=2&feeNQT={}&amountNQT={}' http://{}:6876/nxt?requestType=sendMoney" .format(j % 10, fees[j % 3], amounts[j % 3], host_IP)) except: print("One transaction not sent to h{} due to some exception". format(host_num)) elapsed = time.time() - start if elapsed >= total_time: stop_it = True break if stop_it: break if (total_time // 4) <= elapsed < (total_time // 2): try: configLinkStatus(net, number_of_hosts, "down") except: print("Config link down failed") elif elapsed >= (total_time // 2): try: configLinkStatus(net, number_of_hosts, "up") except: print("Config link up failed") time.sleep(total_time // n_bursts) for host in hosts: try: net.nameToNode[host].cmd( "curl --data 'secretPhrase={}' http://{}:6876/nxt?requestType=stopForging" .format(passphrases[host], hostToIp(host))) except: pass ############ time.sleep(5) try: resetNetParams(net, net_hosts) except: pass try: net.stop() except: pass proc_log_name = 'processes_log_{}_{}'.format(script_name, unique) log_analysis("Proc_Logs/{}".format(proc_log_name), ["java"]) make_plots('Analysis/analysis_{}.csv'.format(proc_log_name)) obs_dict = {obs.observedHost: obs.TPS() for obs in observers} export_TPS_to_CSV( 'Analysis/TPS_measures_{}_{}.csv'.format(script_name, unique), obs_dict) mean_TPS = sum(observer.TPS() for observer in observers) / len(observers) tshark_output_to_CSV( 'Analysis/traffic_volume_{}_{}.csv'.format(script_name, unique), 'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique), 'websocket') total_traffic = tshark_total_traffic( 'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique), ['tcp', 'udp']) return [mean_TPS, total_traffic]
h1.cmd('route add default gw 10.0.1.1') h2.cmd('route add default gw 10.0.4.1') for h in (h1, h2): h.cmd('./scripts/disable_offloading.sh') h.cmd('./scripts/disable_ipv6.sh') r1.cmd('ifconfig r1-eth0 10.0.1.1/24') r1.cmd('ifconfig r1-eth1 10.0.2.1/24') r2.cmd('ifconfig r2-eth0 10.0.2.2/24') r2.cmd('ifconfig r2-eth1 10.0.3.1/24') r3.cmd('ifconfig r3-eth0 10.0.3.2/24') r3.cmd('ifconfig r3-eth1 10.0.4.1/24') r1.cmd('route add -net 10.0.3.0/24 gw 10.0.2.2') r1.cmd('route add -net 10.0.4.0/24 gw 10.0.2.2') r2.cmd('route add -net 10.0.1.0/24 gw 10.0.2.1') r2.cmd('route add -net 10.0.4.0/24 gw 10.0.3.2') r3.cmd('route add -net 10.0.1.0/24 gw 10.0.3.1') r3.cmd('route add -net 10.0.2.0/24 gw 10.0.3.1') for r in (r1, r2, r3): r.cmd('./scripts/disable_arp.sh') r.cmd('./scripts/disable_icmp.sh') r.cmd('./scripts/disable_ip_forward.sh') r.cmd('./router &') net.start() CLI(net) net.stop()
def simpleTest(): topo = RingTopo() controller_ip = '127.0.0.1' net = Mininet(topo=topo, controller=lambda a: RemoteController(a, ip=controller_ip, port=6633), link=TCLink) net.start() for host in net.hosts: for h in net.hosts: host.cmd("arp -s %s %s" % (h.IP(), h.MAC())) print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" #CLI(net) sleep(5) net.pingAll() net.pingAll() h0 = net.getNodeByName("h0") h1 = net.getNodeByName("h1") h2 = net.getNodeByName("h2") h3 = net.getNodeByName("h3") h4 = net.getNodeByName("h4") h5 = net.getNodeByName("h5") h6 = net.getNodeByName("h6") h7 = net.getNodeByName("h7") h8 = net.getNodeByName("h8") h9 = net.getNodeByName("h9") h10 = net.getNodeByName("h10") h11 = net.getNodeByName("h11") h12 = net.getNodeByName("h12") h13 = net.getNodeByName("h13") h14 = net.getNodeByName("h14") h15 = net.getNodeByName("h15") h16 = net.getNodeByName("h16") h17 = net.getNodeByName("h17") h18 = net.getNodeByName("h18") h19 = net.getNodeByName("h19") h20 = net.getNodeByName("h20") h21 = net.getNodeByName("h21") h22 = net.getNodeByName("h22") h23 = net.getNodeByName("h23") h24 = net.getNodeByName("h24") h25 = net.getNodeByName("h25") h26 = net.getNodeByName("h26") h27 = net.getNodeByName("h27") h28 = net.getNodeByName("h28") h29 = net.getNodeByName("h29") h30 = net.getNodeByName("h30") h31 = net.getNodeByName("h31") h32 = net.getNodeByName("h32") h33 = net.getNodeByName("h33") h34 = net.getNodeByName("h34") h35 = net.getNodeByName("h35") h36 = net.getNodeByName("h36") h37 = net.getNodeByName("h37") h38 = net.getNodeByName("h38") h39 = net.getNodeByName("h39") h40 = net.getNodeByName("h40") h41 = net.getNodeByName("h41") h42 = net.getNodeByName("h42") h43 = net.getNodeByName("h43") h44 = net.getNodeByName("h44") h45 = net.getNodeByName("h45") h46 = net.getNodeByName("h46") h47 = net.getNodeByName("h47") h48 = net.getNodeByName("h48") h49 = net.getNodeByName("h49") h51 = net.getNodeByName("h51") h52 = net.getNodeByName("h52") h56 = net.getNodeByName("h56") h60 = net.getNodeByName("h60") h64 = net.getNodeByName("h64") h0.cmd('python mininet/source/cache_algorithm/{} -i 0 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_0.txt &'.format(cache_script[cache_type])) h1.cmd('python mininet/source/cache_algorithm/{} -i 1 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_1.txt &'.format(cache_script[cache_type])) h2.cmd('python mininet/source/cache_algorithm/{} -i 2 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_2.txt &'.format(cache_script[cache_type])) h3.cmd('python mininet/source/cache_algorithm/{} -i 3 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_3.txt &'.format(cache_script[cache_type])) h4.cmd('python mininet/source/cache_algorithm/{} -i 4 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_4.txt &'.format(cache_script[cache_type])) h5.cmd('python mininet/source/cache_algorithm/{} -i 5 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_5.txt &'.format(cache_script[cache_type])) h6.cmd('python mininet/source/cache_algorithm/{} -i 6 >>/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_6.txt &'.format(cache_script[cache_type])) h7.cmd('python mininet/source/cache_algorithm/{} -i 7 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_7.txt &'.format(cache_script[cache_type])) h8.cmd('python mininet/source/cache_algorithm/{} -i 8 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_8.txt &'.format(cache_script[cache_type])) h9.cmd('python mininet/source/cache_algorithm/{} -i 9 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_9.txt &'.format(cache_script[cache_type])) h10.cmd('python mininet/source/cache_algorithm/{} -i 10 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_10.txt &'.format(cache_script[cache_type])) h11.cmd('python mininet/source/cache_algorithm/{} -i 11 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_11.txt &'.format(cache_script[cache_type])) h12.cmd('python mininet/source/cache_algorithm/{} -i 12 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_12.txt &'.format(cache_script[cache_type])) h13.cmd('python mininet/source/cache_algorithm/{} -i 13 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_13.txt &'.format(cache_script[cache_type])) h14.cmd('python mininet/source/cache_algorithm/{} -i 14 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_14.txt &'.format(cache_script[cache_type])) h15.cmd('python mininet/source/cache_algorithm/{} -i 15 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_15.txt &'.format(cache_script[cache_type])) h16.cmd('python mininet/source/cache_algorithm/{} -i 16 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_16.txt &'.format(cache_script[cache_type])) h17.cmd('python mininet/source/cache_algorithm/{} -i 17 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_17.txt &'.format(cache_script[cache_type])) h18.cmd('python mininet/source/cache_algorithm/{} -i 18 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_18.txt &'.format(cache_script[cache_type])) h19.cmd('python mininet/source/cache_algorithm/{} -i 19 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_19.txt &'.format(cache_script[cache_type])) h20.cmd('python mininet/source/cache_algorithm/{} -i 20 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_20.txt &'.format(cache_script[cache_type])) h21.cmd('python mininet/source/cache_algorithm/{} -i 21 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_21.txt &'.format(cache_script[cache_type])) h22.cmd('python mininet/source/cache_algorithm/{} -i 22 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_22.txt &'.format(cache_script[cache_type])) h23.cmd('python mininet/source/cache_algorithm/{} -i 23 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_23.txt &'.format(cache_script[cache_type])) h24.cmd('python mininet/source/cache_algorithm/{} -i 24 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_24.txt &'.format(cache_script[cache_type])) h25.cmd('python mininet/source/cache_algorithm/{} -i 25 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_25.txt &'.format(cache_script[cache_type])) h26.cmd('python mininet/source/cache_algorithm/{} -i 26 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_26.txt &'.format(cache_script[cache_type])) h27.cmd('python mininet/source/cache_algorithm/{} -i 27 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_27.txt &'.format(cache_script[cache_type])) h28.cmd('python mininet/source/cache_algorithm/{} -i 28 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_28.txt &'.format(cache_script[cache_type])) h29.cmd('python mininet/source/cache_algorithm/{} -i 29 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_29.txt &'.format(cache_script[cache_type])) h30.cmd('python mininet/source/cache_algorithm/{} -i 30 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_30.txt &'.format(cache_script[cache_type])) h31.cmd('python mininet/source/cache_algorithm/{} -i 31 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_31.txt &'.format(cache_script[cache_type])) h32.cmd('python mininet/source/cache_algorithm/{} -i 32 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_32.txt &'.format(cache_script[cache_type])) h33.cmd('python mininet/source/cache_algorithm/{} -i 33 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_33.txt &'.format(cache_script[cache_type])) h34.cmd('python mininet/source/cache_algorithm/{} -i 34 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_34.txt &'.format(cache_script[cache_type])) h35.cmd('python mininet/source/cache_algorithm/{} -i 35 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_35.txt &'.format(cache_script[cache_type])) h36.cmd('python mininet/source/cache_algorithm/{} -i 36 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_36.txt &'.format(cache_script[cache_type])) h37.cmd('python mininet/source/cache_algorithm/{} -i 37 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_37.txt &'.format(cache_script[cache_type])) h38.cmd('python mininet/source/cache_algorithm/{} -i 38 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_38.txt &'.format(cache_script[cache_type])) h39.cmd('python mininet/source/cache_algorithm/{} -i 39 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_39.txt &'.format(cache_script[cache_type])) h40.cmd('python mininet/source/cache_algorithm/{} -i 40 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_40.txt &'.format(cache_script[cache_type])) h41.cmd('python mininet/source/cache_algorithm/{} -i 41 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_41.txt &'.format(cache_script[cache_type])) h42.cmd('python mininet/source/cache_algorithm/{} -i 42 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_42.txt &'.format(cache_script[cache_type])) h43.cmd('python mininet/source/cache_algorithm/{} -i 43 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_43.txt &'.format(cache_script[cache_type])) h44.cmd('python mininet/source/cache_algorithm/{} -i 44 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_44.txt &'.format(cache_script[cache_type])) h45.cmd('python mininet/source/cache_algorithm/{} -i 45 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_45.txt &'.format(cache_script[cache_type])) h46.cmd('python mininet/source/cache_algorithm/{} -i 46 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_46.txt &'.format(cache_script[cache_type])) h47.cmd('python mininet/source/cache_algorithm/{} -i 47 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_47.txt &'.format(cache_script[cache_type])) h48.cmd('python mininet/source/cache_algorithm/{} -i 48 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_48.txt &'.format(cache_script[cache_type])) h49.cmd('python mininet/source/cache_algorithm/{} -i 49 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_49.txt &'.format(cache_script[cache_type])) h51.cmd('python mininet/source/cache_algorithm/{} -i 51 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_51.txt &'.format(cache_script[cache_type])) h52.cmd('python mininet/source/cache_algorithm/{} -i 52 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_52.txt &'.format(cache_script[cache_type])) h56.cmd('python mininet/source/cache_algorithm/{} -i 56 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_56.txt &'.format(cache_script[cache_type])) h60.cmd('python mininet/source/cache_algorithm/{} -i 60 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_60.txt &'.format(cache_script[cache_type])) h64.cmd('python mininet/source/cache_algorithm/{} -i 64 >> /home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/log/log_console_64.txt &'.format(cache_script[cache_type])) DIR = '/home/hpcc/workspace/telco_cdn_mininet/mininet/source/cache_algorithm/result' if not os.path.exists(DIR): os.makedirs(DIR) timer = 0 while True: numfile = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))]) if (numfile >= 54): print("All servers are finished: {}".format(numfile)) break print("Number of servers are finished: {} after {} seconds".format(numfile, timer)) timer += 1 sleep(1) net.stop()
def createDatacenter(): net = Mininet(controller=RemoteController) info('*** Adding controller\n') net.addController('c0', controller=RemoteController, ip="127.0.0.1", port=6633) # Build 16 hosts hosts = [] i = 0 while (i < 24): hosts.append(net.addHost('h%s' % (i + 1))) i += 1 # Build 2 Core switches core = [] j = 0 while (j < 2): core.append(net.addSwitch('s%s' % (i + 1))) i += 1 j += 1 # Build 4 Aggr switches aggr = [] j = 0 while (j < 4): aggr.append(net.addSwitch('s%s' % (i + 1))) i += 1 j += 1 # Build 8 Edge switches edge = [] j = 0 while (j < 8): edge.append(net.addSwitch('s%s' % (i + 1))) i += 1 j += 1 # Link aggr with core i = 0 linkOpts = { 'bw': 100 } while (i < 4): j = 0 while j < 2: net.addLink(aggr[i], core[j]) #, cls=TCLink, **linkOpts); j += 1 i += 1 # Link edge with aggr i = 0 linkOpts = { 'bw': 50 } while (i < 8): j = i / 2 net.addLink(edge[i], aggr[j]) #, cls=TCLink, **linkOpts); i += 1 # Link host with edge i = 0 linkOpts = { 'bw': 20 } while (i < 24): j = i / 3 net.addLink(hosts[i], edge[j]) #, cls=TCLink, **linkOpts); i += 1 info('*** Starting network\n') net.start() time.sleep(5) info('*** adding q h1-h2\n') cmd1 = 'curl -i http://221.199.216.240:8080/wm/haqos/createEfQ/00:00:00:00:00:00:00:1f/00:00:00:00:00:00:00:26/60000000/5001/-1/json -X PUT' result1 = os.popen(cmd1) time.sleep(3) info('*** adding q h2-h1\n') cmd2 = 'curl -i http://221.199.216.240:8080/wm/haqos/createEfQ/00:00:00:00:00:00:00:26/00:00:00:00:00:00:00:1f/60000000/-1/5001/json -X PUT' result2 = os.popen(cmd2) info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
def main(): args = parser.parse_args() if args.bmv2_exe is None: bmv2_exe = os.path.join(os.environ['BMV2_REPO'], 'targets', 'booster_switch', 'simple_switch') else: bmv2_exe = args.bmv2_exe with open(args.config) as f: cfg = yaml.load(f) topo = FPTopo(cfg['hosts'], cfg['switches'], bmv2_exe, args.log, args.verbose, args.config, args.bw, args.qlen) print("Starting mininet") net = Mininet(topo=topo, host=P4Host, switch=P4Switch, controller=None, link=TCLink) net.start() net.staticArp() try: topo.init(net) if args.pcap_dump: topo.start_tcp_dumps(net, args.pcap_dump, args.pcap_to, args.pcap_from) sleep(.1) topo.do_switch_replay(net) sleep(.1) topo.do_commands(net) sleep(1) for arg in args.pre_replay: replay_args = arg.split(":") replay_arg1 = replay_args[0].split('-') if len(replay_args) not in (3, 4) or len(replay_arg1) != 2: raise Exception( "args.pre_replay must have form Host-Switch:File:bg[:Speed]" ) if len(replay_args) == 4: speed = int(replay_args[3]) else: speed = None bg = int(replay_args[2]) topo.do_pre_replay(net, replay_arg1[0], replay_arg1[1], replay_args[1], bg, speed) topo.run_host_programs(net, args.host_prog, args.fg_host_prog, args.showExitStatus) sleep(1) for to_replay in args.replay: replay_args = to_replay.split(":") replay_arg1 = replay_args[0].split('-') if len(replay_args) != 2 or len(replay_arg1) != 2: raise Exception("args.replay must have form Host-Switch:File") topo.do_host_replay(net, replay_arg1[0], replay_arg1[1], replay_args[1]) sleep(.1) sleep(args.time) if args.cli: CLI(net) if args.pcap_dump: # Pause for a second before stopping tcpdump to allow process to complete sleep(1) topo.stop_tcp_dumps(net) print("Stopping mininet") net.stop() except Exception as e: print("Encountered exception running mininet:" + str(e)) net.stop() raise
def main(): net = Mininet(controller=None) # add hosts h1 = net.addHost("h1", ip="172.16.10.1/24") h2 = net.addHost("h2", ip="172.16.10.2/24") # add switch 1 sw1 = net.addSwitch("sw1", target_name="p4dockerswitch", cls=P4DockerSwitch, sai_port=25000, pcap_dump=True) # add switch 2 sw2 = net.addSwitch("sw2", target_name="p4dockerswitch", cls=P4DockerSwitch, sai_port=25001, pcap_dump=True) # add links if StrictVersion(VERSION) <= StrictVersion("2.2.0"): net.addLink(sw1, h1, port1=1) net.addLink(sw1, sw2, port1=2, port2=2) net.addLink(sw2, h2, port1=1) else: net.addLink(sw1, h1, port1=1, fast=False) net.addLink(sw1, sw2, port1=2, port2=2, fast=False) net.addLink(sw2, h2, port1=1, fast=False) net.start() print "Waiting 10 seconds for switches to intialize..." time.sleep(10) cfg_switch1() cfg_switch2() CLI(net) net.stop()