class FaucetUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: 1: native_vlan: 100 description: "b1" 2: native_vlan: 100 description: "b2" 3: native_vlan: 100 description: "b3" 4: native_vlan: 100 description: "b4" vlans: 100: description: "untagged" """ def setUp(self): super(FaucetUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_untagged=4) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_untagged(self): self.assertEquals(0, self.net.pingAll())
class FaucetTaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: %(port_1)d: tagged_vlans: [100] description: "b1" %(port_2)d: tagged_vlans: [100] description: "b2" %(port_3)d: tagged_vlans: [100] description: "b3" %(port_4)d: tagged_vlans: [100] description: "b4" vlans: 100: description: "tagged" """ def setUp(self): self.CONFIG = self.CONFIG % PORT_MAP super(FaucetTaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=4) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_tagged(self): self.assertEquals(0, self.net.pingAll())
class FaucetTaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: 1: tagged_vlans: [100] description: "b1" 2: tagged_vlans: [100] description: "b2" 3: tagged_vlans: [100] description: "b3" 4: tagged_vlans: [100] description: "b4" vlans: 100: description: "tagged" """ def setUp(self): super(FaucetTaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=4) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() def test_tagged(self): self.assertEquals(0, self.net.pingAll())
def perfTest(): "test the fat tree" topo = fatTree(n=4) net = Mininet( topo=topo, controller=None) net.addController( 'ctr0', controller=RemoteController, ip='127.0.0.1', port=6633 ) net.build() net.waitConnected() net.pingAll() host={} maxHost = 16 for y in range(0, maxHost): hostName = 'h' +str(y) host[y] = net.get(hostName) for x in range(0, (maxHost/2)): src = host[x] dst = host[(maxHost-1)-x] thread.start_new_thread(iperf_thread(net, dst, src)) thread.start_new_thread(iperf_thread(net, src, dst)) out= {} for h in range(0, maxHost): out[ host[h] ] = '/tmp/%s.out' % host[h].name host[h].cmd( 'echo >', out[ host[h] ] ) packetSize=[50,100,150,200,300,450,600,750,900,1200] for i in range (10): host[h].cmdPrint('ping', host[h+1].IP(), '-s', packetSize[i], '>', out[ host[h] ], '&' ) CLI(net) net.stop() subprocess.call(['mn', '-c']) pass
def test_topology(topo: Topo, net: Mininet): print("Dumping host connections") dumpNodeConnections(net.hosts) print("Waiting switch connections") net.waitConnected() print("Testing network connectivity - (i: switches are learning)") net.pingAll() print("Testing network connectivity - (ii: after learning)") net.pingAll() print("Get all hosts") print(topo.hosts(sort=True)) # print("Get all links") # for link in topo.links(sort=True, withKeys=True, withInfo=True): # pprint(link) # print() if conf['test']['iperf'] == -1: return else: hosts = [net.get(i) for i in topo.hosts(sort=True)] if conf['test']['iperf'] == 0: net.iperf((hosts[0], hosts[-1])) else: [net.iperf((i, j)) for i in hosts for j in hosts if i != j]
class FaucetTaggedAndUntaggedVlanTest(FaucetUntaggedTest): CONFIG = CONFIG_HEADER + """ interfaces: 1: tagged_vlans: [100] description: "b1" 2: native_vlan: 100 description: "b2" 3: native_vlan: 100 description: "b3" 4: native_vlan: 100 description: "b4" vlans: 100: description: "mixed" unicast_flood: False """ def setUp(self): super(FaucetUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=1, n_untagged=3) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_untagged(self): self.net.pingAll()
def OutcastTest(): num_TOR = int(args.size) topo = FacebookFatTree(num_TOR) host_number = num_TOR * num_TOR * HOST_PER_NODE print(host_number) net = Mininet(topo=topo, host=CPULimitedHost, switch=LinuxBridge, link=TCLink, autoStaticArp=True) print("end create net") net.start() net.waitConnected() h1 = net.getNodeByName('host_0') print(h1.cmd('ping -c 2 10.0.0.2')) clients = [ net.getNodeByName('host_%d' % (i)) for i in xrange(1, host_number) ] sleep(1) monitors = [] # add more switch interface for fat-tree topo # may consider using mtr to test ping, -s PACKETSIZE, --psize PACKETSIZE monitor = multiprocessing.Process( target=monitor_qlen, args=('edge_0-eth%d' % (NUM_FRABRIC_SWITCH + num_TOR + 1), 0.1, '%s/qlen_edge0-eth%d.txt' % (path, NUM_FRABRIC_SWITCH + num_TOR + 1))) monitor.start() monitors.append(monitor) server = net.getNodeByName('outer') server.sendCmd('iperf -s -t %d -i 2 > %s/iperf_outer.txt' % (int(args.duration) + 5, path)) for i in xrange(1, host_number): node_name = 'host_%d' % (i) cmd = 'iperf -c %s -t %d -i 2 -O 2 -Z %s' % ( server.IP(), int(args.duration), args.congestion) # cmd = 'iperf -c %s -t %d -i 2 -O 2 -Z %s > %s/iperf_%s.txt' % (server.IP(), int(args.duration), args.congestion, path, node_name) h = net.getNodeByName(node_name) h.sendCmd(cmd) progress(int(args.duration)) for monitor in monitors: monitor.terminate() net.getNodeByName('outer').pexec("/sbin/ifconfig > %s/ifconfig.txt" % path, shell=True) for i in xrange(0, host_number): net.getNodeByName('host_%d' % (i)).waitOutput() print('host_%d finished' % (i)) net.getNodeByName('outer').waitOutput() print('outer finished') net.stop()
def test( serverCount ): "Test this setup" setLogLevel( 'info' ) net = Mininet( topo=SingleSwitchTopo( 3 ), controller=[ ONOSCluster( 'c0', serverCount ) ], switch=ONOSOVSSwitch ) net.start() net.waitConnected() CLI( net ) net.stop()
def test(serverCount): "Test this setup" setLogLevel('info') net = Mininet(topo=SingleSwitchTopo(3), controller=[ONOSCluster('c0', serverCount)], switch=ONOSOVSSwitch) net.start() net.waitConnected() CLI(net) net.stop()
def main(): topo = FacebookFatTree(4) host_number = 32 # topo = StarTopo(host_number) # print("end create topo") net = Mininet(topo=topo, host=CPULimitedHost, switch=LinuxBridge, link=TCLink, autoStaticArp=True) print("end create net") net.start() net.waitConnected() h1 = net.getNodeByName('host_0') print(h1.cmd('ping -c 2 10.0.0.2')) h1.sendCmd( 'iperf -s -t 20 -i 2 > /home/ubuntu/test/output-cubic/iperf_host_0.txt' ) sleep(1) clients = [ net.getNodeByName('host_%d' % (i)) for i in xrange(1, host_number) ] # waitListening(clients[0], h1, 5001) monitors = [] # add more switch interface for fat-tree topo # may consider using mtr to test ping, -s PACKETSIZE, --psize PACKETSIZE monitor = multiprocessing.Process( target=monitor_qlen, args=('rack_0-eth5', 0.1, '%s/qlen_rack0-eth5.txt' % "/home/ubuntu/test/output-cubic")) monitor.start() monitors.append(monitor) for i in xrange(1, host_number): node_name = 'host_%d' % (i) cmd = 'iperf -c 10.0.0.1 -t %d -l 1m -b 10m -Z cubic > %s/iperf_%s.txt' % ( 20, "/home/ubuntu/test/output-cubic", node_name) h = net.getNodeByName(node_name) h.sendCmd(cmd) progress(20) for monitor in monitors: monitor.terminate() net.getNodeByName('host_0').pexec("/sbin/ifconfig > %s/ifconfig.txt" % "/home/ubuntu/test/output-cubic", shell=True) for i in xrange(0, host_number): net.getNodeByName('host_%d' % (i)).waitOutput() print('host_%d finished' % (i)) net.stop()
def restart_net_switch(sTopo): global net global topo print("Saved the topology") print("Stopping the network") net.stop() net = None topo = sTopo print("Stopped the network") print("Starting new network") net = Mininet(topo, controller=OVSController, autoSetMacs=True) net.start() print("Done restarting") print("Waiting for all switches to connect...") net.waitConnected() print("Done")
def main(exec_tests=False, tests=[]): setLogLevel('info') # Instantiate IDS Test Framework ids_test = IDSTestFramework() net = Mininet(topo=ids_test, controller=RemoteController, autoStaticArp=False) net.start() int_routers = [ net.get(router.name) for key, router in ids_test.int_routers.iteritems() ] ext_routers = [ net.get(router.name) for key, router in ids_test.ext_routers.iteritems() ] ids_test.int_topo_class.configure_routers(int_routers) ids_test.ext_topo_class.configure_routers(ext_routers, ids_test.int_routers) int_hosts = [net.get(host) for host in ids_test.int_hosts] ext_hosts = [net.get(host) for host in ids_test.ext_hosts] int_switches = [ net.get(switch) for host, switch in ids_test.int_switches.iteritems() ] ext_switches = [ net.get(switch) for host, switch in ids_test.ext_switches.iteritems() ] targets_arr = ids_test.log_target_hosts(net) time.sleep(5) if exec_tests: net.waitConnected() print 'Executing test cases' ids_test.exec_test_cases(tests, int_hosts, ext_hosts, int_switches, ext_switches, int_routers, ext_routers, targets_arr) CLI(net) net.stop()
def simpleTest(): "Create and test a simple network" topo = MeshTopo(n=10) net = Mininet(topo=topo, controller=RemoteController, host=CPULimitedHost, link=TCLink, waitConnected=True) net.start() print("Dumping host connections") dumpNodeConnections(net.hosts) switches = net.switches # for i in range(len(switches)): # switches[i].cmd('ovs-vsctl set bridge s%s stp-enable=true' % (i+1)) net.waitConnected() print("Testing network connectivity") net.pingAll() hosts = net.hosts print("Pinging h6 from h1") print(hosts[0].cmd('ping -c1 %s' % hosts[5].IP())) print("Running iperf between h1 and h6") print(net.iperf([hosts[0], hosts[5]])) print("Running iperf between h1 and h10") print(net.iperf([hosts[0], hosts[9]])) print("Running ifconfig on h1") print(hosts[0].cmd('ifconfig')) print("Running route on h1") print(hosts[0].cmd('route')) print("Running traceroute from h1 to h10") print(hosts[0].cmd('traceroute %s' % hosts[9].IP())) # print("Running nslookup from h1") # print(hosts[0].cmd('nslookup www.google.com')) net.stop()
class MininetRunner: def __init__(self, topo_file): topo = NetworkTopo(topo_file) self.net = Mininet(topo=topo, controller=None) self.net.addController(controller=P4Controller, switches=self.net.switches) self.start() time.sleep(1) """ Execute any commands provided in the topology.json file on each Mininet host """ for host_name, host_info in topo.topo['hosts'].items(): h = self.net.get(host_name) if "commands" in host_info: for cmd in host_info["commands"]: h.cmd(cmd) self.net.staticArp() CLI(self.net) # stop right after the CLI is exited self.net.stop() def start(self): "Start controller and switches." if not self.net.built: self.net.build() info('*** Starting %s switches\n' % len(self.net.switches)) for switch in self.net.switches: info(switch.name + ' ') switch.start(self.net.controllers) started = {} for swclass, switches in groupby( sorted(self.net.switches, key=lambda s: str(type(s))), type): switches = tuple(switches) if hasattr(swclass, 'batchStartup'): success = swclass.batchStartup(switches) started.update({s: s for s in success}) info('\n') info('*** Starting controller\n') for controller in self.net.controllers: info(controller.name + ' ') controller.start() info('\n') if self.net.waitConn: self.net.waitConnected()
class FaucetTaggedAndUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: 1: tagged_vlans: [100] description: "b1" 2: tagged_vlans: [100] description: "b2" 3: native_vlan: 101 description: "b3" 4: native_vlan: 101 description: "b4" vlans: 100: description: "tagged" 101: description: "untagged" """ def setUp(self): super(FaucetTaggedAndUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=2, n_untagged=2) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() def test_seperate_untagged_tagged(self): tagged_host_pair = self.net.hosts[0:1] untagged_host_pair = self.net.hosts[2:3] # hosts within VLANs can ping each other self.assertEquals(0, self.net.ping(tagged_host_pair)) self.assertEquals(0, self.net.ping(untagged_host_pair)) # hosts cannot ping hosts in other VLANs self.assertEquals(100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]])) def tearDown(self): self.net.stop() super(FaucetTaggedAndUntaggedTest, self).tearDown() time.sleep(1)
def emptyNet(): "Create an empty network and add nodes to it." setLogLevel("info") #setLogLevel("debug") net = Mininet(controller=None, switch=MyBridge) #info( '*** Adding controller\n' ) #net.addController( 'c0' ) info('*** Adding hosts\n') # Create h + int for the number of hosts read in from DOT file createdHosts = [] for h in range(len(hosts)): host = 'h' + str(h + 1) host = net.addHost(host, ip='10.0.0.' + str(h + 1)) createdHosts.append(host) info('*** Adding switch\n') createdSwitches = [] for s in range(len(switches)): switch = 's' + str(s + 1) switch = net.addSwitch(switch) createdSwitches.append(switch) info('*** Creating links\n') for l in range(len(links)): temp1 = links[l] temp2 = temp1[0] + temp1[1] temp3 = temp1[-2] + temp1[-1] net.addLink(temp2, temp3) info('*** Starting network\n') net.start() #net.staticArp() net.waitConnected() # https://github.com/mininet/mininet/wiki/FAQ info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
class FaucetTaggedAndUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: %(port_1)d: tagged_vlans: [100] description: "b1" %(port_2)d: tagged_vlans: [100] description: "b2" %(port_3)d: native_vlan: 101 description: "b3" %(port_4)d: native_vlan: 101 description: "b4" vlans: 100: description: "tagged" 101: description: "untagged" """ def setUp(self): self.CONFIG = self.CONFIG % PORT_MAP super(FaucetTaggedAndUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=2, n_untagged=2) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_seperate_untagged_tagged(self): tagged_host_pair = self.net.hosts[0:1] untagged_host_pair = self.net.hosts[2:3] # hosts within VLANs can ping each other self.assertEquals(0, self.net.ping(tagged_host_pair)) self.assertEquals(0, self.net.ping(untagged_host_pair)) # hosts cannot ping hosts in other VLANs self.assertEquals(100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetTaggedAndUntaggedTest(FaucetTest): CONFIG = CONFIG_HEADER + """ interfaces: %(port_1)d: tagged_vlans: [100] description: "b1" %(port_2)d: tagged_vlans: [100] description: "b2" %(port_3)d: native_vlan: 101 description: "b3" %(port_4)d: native_vlan: 101 description: "b4" vlans: 100: description: "tagged" 101: description: "untagged" """ def setUp(self): self.CONFIG = self.CONFIG % PORT_MAP super(FaucetTaggedAndUntaggedTest, self).setUp() self.topo = FaucetSwitchTopo(n_tagged=2, n_untagged=2) self.net = Mininet(self.topo, controller=FAUCET) self.net.start() dumpNodeConnections(self.net.hosts) self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') def test_seperate_untagged_tagged(self): tagged_host_pair = self.net.hosts[0:1] untagged_host_pair = self.net.hosts[2:3] # hosts within VLANs can ping each other self.assertEquals(0, self.net.ping(tagged_host_pair)) self.assertEquals(0, self.net.ping(untagged_host_pair)) # hosts cannot ping hosts in other VLANs self.assertEquals( 100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
def restart_net(sTopo): global net global topo print("Saved the topology") print("Stopping the network") net.stop() net = None topo = sTopo print("Stopped the network") print("Starting new network") net = Mininet(topo, controller=OVSController, autoSetMacs=True) net.start() print("Dumping host connections") dumpNodeConnections(net.hosts) print("Testing network connectivity") ping = net.pingAll() print("Done restarting") print("Waiting for all switches to connect...") net.waitConnected() print("Done") return ping
def startNetwork(): info('** Creating the tree network\n') topo = TreeTopo() topo.readFromFile("topology.in") global net net = Mininet( topo=topo, link=TCLink, controller=lambda name: RemoteController(name, ip='192.168.56.1'), listenPort=6633, autoSetMacs=True) info('** Starting the network\n') net.start() net.waitConnected() info('** Creating QoS Queues\n') createQosQueues(net, topo.linkInfo) # Create QoS Queues info('** Running CLI\n') CLI(net)
class FaucetTest(unittest.TestCase): ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss' CONFIG = '' CONTROLLER_IPV4 = '10.0.0.254' CONTROLLER_IPV6 = 'fc00::1:254' OFCTL = 'ovs-ofctl -OOpenFlow13' def setUp(self): self.tmpdir = tempfile.mkdtemp() os.environ['FAUCET_CONFIG'] = os.path.join(self.tmpdir, 'faucet.yaml') os.environ['FAUCET_LOG'] = os.path.join(self.tmpdir, 'faucet.log') os.environ['FAUCET_EXCEPTION_LOG'] = os.path.join(self.tmpdir, 'faucet-exception.log') self.debug_log_path = os.path.join(self.tmpdir, 'ofchannel.log') self.CONFIG = '\n'.join(( self.get_config_header(DPID, HARDWARE), self.CONFIG % PORT_MAP, 'ofchannel_log: "%s"' % self.debug_log_path)) open(os.environ['FAUCET_CONFIG'], 'w').write(self.CONFIG) self.net = None self.topo = None def get_config_header(self, dpid, hardware): return ''' --- dp_id: 0x%s name: "faucet-1" hardware: "%s" ''' % (dpid, hardware) def attach_physical_switch(self): switch = self.net.switches[0] hosts_count = len(self.net.hosts) for i, test_host_port in enumerate(sorted(SWITCH_MAP)): port_i = i + 1 mapped_port_i = port_i + hosts_count phys_port = Intf(SWITCH_MAP[test_host_port], node=switch) switch.cmd('ifconfig %s up' % phys_port) switch.cmd('ovs-vsctl add-port %s %s' % (switch.name, phys_port.name)) for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)): port_x, port_y = port_pair switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % ( self.OFCTL, switch.name, port_x, port_y)) for _ in range(20): if (os.path.exists(self.debug_log_path) and os.path.getsize(self.debug_log_path) > 0): return time.sleep(1) print 'physical switch could not connect to controller' sys.exit(-1) def start_net(self): self.net = Mininet(self.topo, controller=FAUCET) self.net.start() if SWITCH_MAP: self.attach_physical_switch() else: self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') dumpNodeConnections(self.net.hosts) def tearDown(self): if self.net is not None: self.net.stop() # Mininet takes a long time to actually shutdown. # TODO: detect and block when Mininet isn't done. time.sleep(5) shutil.rmtree(self.tmpdir) def add_host_ipv6_address(self, host, ip_v6): host.cmd('ip -6 addr add %s dev %s' % (ip_v6, host.intf())) def one_ipv4_ping(self, host, dst): ping_result = host.cmd('ping -c1 %s' % dst) self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result)) def one_ipv4_controller_ping(self, host): self.one_ipv4_ping(host, self.CONTROLLER_IPV4) def one_ipv6_ping(self, host, dst): # TODO: retry our one ping. We should not have to retry. for _ in range(2): ping_result = host.cmd('ping6 -c1 %s' % dst) if re.search(self.ONE_GOOD_PING, ping_result): return self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result)) def one_ipv6_controller_ping(self, host): self.one_ipv6_ping(host, self.CONTROLLER_IPV6) def wait_until_matching_flow(self, flow, timeout=5): # TODO: actually verify flows were communicated to the physical switch. # Could use size of ofchannel log, though this is not authoritative. if SWITCH_MAP: time.sleep(1) return switch = self.net.switches[0] for _ in range(timeout): dump_flows_cmd = '%s dump-flows %s' % (self.OFCTL, switch.name) dump_flows = switch.cmd(dump_flows_cmd) for line in dump_flows.split('\n'): if re.search(flow, line): return time.sleep(1) print flow, dump_flows self.assertTrue(re.search(flow, dump_flows)) def swap_host_macs(self, first_host, second_host): first_host_mac = first_host.MAC() second_host_mac = second_host.MAC() first_host.setMAC(second_host_mac) second_host.setMAC(first_host_mac) def verify_ipv4_routing(self, first_host, first_host_routed_ip, second_host, second_host_routed_ip): first_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (first_host.intf(), first_host_routed_ip.ip))) second_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (second_host.intf(), second_host_routed_ip.ip))) first_host.cmd(('route add -net %s gw %s' % ( second_host_routed_ip.masked(), self.CONTROLLER_IPV4))) second_host.cmd(('route add -net %s gw %s' % ( first_host_routed_ip.masked(), self.CONTROLLER_IPV4))) self.net.ping(hosts=(first_host, second_host)) self.wait_until_matching_flow( 'nw_dst=%s.+set_field:%s->eth_dst' % ( first_host_routed_ip.masked(), first_host.MAC())) self.wait_until_matching_flow( 'nw_dst=%s.+set_field:%s->eth_dst' % ( second_host_routed_ip.masked(), second_host.MAC())) self.one_ipv4_ping(first_host, second_host_routed_ip.ip) self.one_ipv4_ping(second_host, first_host_routed_ip.ip) def verify_ipv6_routing(self, first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip): self.add_host_ipv6_address(first_host, first_host_ip) self.add_host_ipv6_address(second_host, second_host_ip) self.one_ipv6_ping(first_host, second_host_ip.ip) self.one_ipv6_ping(second_host, first_host_ip.ip) self.add_host_ipv6_address(first_host, first_host_routed_ip) self.add_host_ipv6_address(second_host, second_host_routed_ip) first_host.cmd('ip -6 route add %s via %s' % ( second_host_routed_ip.masked(), self.CONTROLLER_IPV6)) second_host.cmd('ip -6 route add %s via %s' % ( first_host_routed_ip.masked(), self.CONTROLLER_IPV6)) self.wait_until_matching_flow( 'ipv6_dst=%s.+set_field:%s->eth_dst' % ( first_host_routed_ip.masked(), first_host.MAC())) self.wait_until_matching_flow( 'ipv6_dst=%s.+set_field:%s->eth_dst' % ( second_host_routed_ip.masked(), second_host.MAC())) self.one_ipv6_controller_ping(first_host) self.one_ipv6_controller_ping(second_host) self.one_ipv6_ping(first_host, second_host_routed_ip.ip) self.one_ipv6_ping(second_host, first_host_routed_ip.ip)
def perfTest(): "Cria a rede e executa o teste de performace " """Lembrando que os testes disponiveis deve ser trocado o parametro abaixo para cada tipo de toplogia a ser testada""" topo = LeafSpine(n=4) test = 'LeafSpine' run_test = 2 #Parametro 1 testa com Iperf largura de banda e 2 testa o ping net = Mininet(topo=topo, controller=RemoteController, link=TCLink, ipBase='172.16.0.0/16') net.start() seconds = 100 net.waitConnected() print "Espara a arquitetura convergir" net.pingAll() host = {} print "Comeca os testes" if (test == 'Fattree' or test == 'FatTreeNoLink' or test == 'FatTreeTopotest'): max_host = 100 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test == 'LeafSpine' or test =='LeafSpinenolink' or test == 'Facebook4post' or test == 'FacebookNewFabric' ): print "*** Testes arquiteturas baseadas em Leaf Spine ***" max_host = 100 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test =='HybridFattree' or test == 'HybridFattreenolink'): print "***Testes com Hybrid Fattree" max_host = 100 for x in range(0, max_host): host_name = 'h' +str(x) print "Adding %s" % host_name host[x] = net.get(host_name) if (run_test == 1): print " Teste com IPERF largura de banda" if ((max_host%2) == 0): for x in range(0, (max_host/2)): src = host[x] dst = host[(max_host-1)-x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) for x in range(0, (max_host/2)): dst = host[x] src = host[(max_host-1)-x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) else: dst = host[0] src = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[1] src = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[2] src = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[3] src = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[6] src = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[7] src = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[8] src = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[12] src = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[13] src = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[18] src = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) src = host[0] dst = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[1] dst = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[2] dst = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[3] dst = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[6] dst = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[7] dst = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[8] dst = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[12] dst = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[13] dst = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[18] dst = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) elif (run_test ==2 and test =='DcellNoLoop'): print "Testes com Ping latencia" outfiles, errfiles = {}, {} packetsize = 1454 #max packet size 1472. MTU set to 1500 bottom = 0 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-5): host[h].cmdPrint('ping', host[h+5].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[bottom].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) bottom = bottom +1 print "Monitora a saida dos resultados para", seconds, "segundos ou milesegundos" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) sleep(11) elif (run_test ==2 and test !='HybridFatTreeNoLink'): print "teste com Ping latencia especifico" outfiles, errfiles = {}, {} packetsize = 54 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-1): host[h].cmdPrint('ping', host[h+1].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[0].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) print "Monitora a saida dos resultados para ", seconds, "segundos ou milisegundos" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) sleep(11) print "Final dos testes" net.stop()
class CPSC558FinalProject: __DEFAULT_RUN_NAME = "main" __DEFAULT_FILE_SERVER_DIRECTORY = "data" __BANDWIDTH_LIMIT_MBPS = 1000 __BANDWIDTH_LIMIT_LATENCY = "1ms" def __init__(self, run_name): self.__run_name = run_name self.__logger = Logger(group=self.__run_name, log_name=__name__, label="558 Project") self.__logger_summary = Logger(group="summary", log_name=__name__, label="558 Project Summary", append=True) self.__net = None # Mininet self.__topo = None self.__controller = None self.__logger.get().info("Instantiated inside Python version: " + str(sys.version)) def run(self): log = self.__logger.get() controller_log_file = os.path.join( self.__logger.make_log_file_directory_path(), "controller.txt") controller_name = "ryu_" + self.__run_name controller_path_relative = "controllers/" if self.__run_name == "demo": controller_source_file_name = "Demo_SimpleSwitch.py" elif self.__run_name == "hub": controller_source_file_name = "DumbHub.py" elif self.__run_name == "switch": controller_source_file_name = "SimpleSwitch.py" elif self.__run_name == "qswitch": controller_source_file_name = "QSwitch.py" else: raise Exception("Invalid run name: " + str(self.__run_name)) controller_path_relative += controller_source_file_name # Instantiate Mininet::Ryu(), which just launches ryu-manager for us controller = Ryu( controller_name, controller_path_relative, # "--verbose", "--log-file", controller_log_file) self.__logger.heading("Running with controller: " + controller_source_file_name) log.info("Instantiating custom Topology class") self.__topo = Topology(self.__logger) log.info("Rendering graph of current topology") self.__topo.render_dotgraph() self.run_with_controller(controller) # log.info("Done") def run_with_controller(self, controller): log = self.__logger.get() log.info("Instantiating Mininet") self.__net = Mininet(topo=self.__topo, controller=controller, waitConnected=False) self.__topo.set_net(self.__net) log.info("Starting Mininet (will wait for controller)") self.__net.start() wait_result = self.__net.waitConnected(timeout=10) if wait_result is False: log.error("Failed to wait for a controller!") log.error("FAIL") return log.info("Mininet found a controller to connect to") # Create qos queues self.__topo.create_qos_queues_on_switch() # Ping tests self.ping_all() # Begin node traffic self.start_tattle_tail(True) self.start_file_traffic(True) self.start_video_traffic(True) # self.wait_for_hosts_to_finish() self.summarize_node_logs() def do_topology_test(self): log = self.__logger.get() log.info("Running topology test") log.info("Instantiating custom Topology class") self.__topo = Topology(self.__logger) log.info("Instantiating Mininet") self.__net = Mininet(topo=self.__topo, controller=mininet.node.RemoteController) self.__topo.set_net(self.__net) self.__topo.consume_instances() log.info("Rendering graph of current topology") self.__topo.render_dotgraph(False) log.info("Done!") def ping_all(self): self.__logger.get().info("Attempting to ping between all nodes") self.__net.pingAll(timeout=1) print() self.__logger.get().info("Done pinging between all nodes") @staticmethod def make_process_stdout_file_path(run_name, file_name, clear=True): output_dir = os.path.join(os.path.dirname(__file__), "log", run_name) try: os.makedirs(output_dir) except FileExistsError: pass file_path = os.path.join(output_dir, file_name + ".txt") if clear and os.path.isfile(file_path): os.unlink(file_path) return file_path def start_tattle_tail(self, use_log: bool = True): log = self.__logger.get() log.info("Starting tattle tail") log_file = self.make_process_stdout_file_path(self.__run_name, "tattle-tail-stdout") log.info(log_file) # Get tattle tail instance tattle = self.__topo.get_tattle_tail_instance() tattle_ip = tattle.IP() log.info("Tattle tail IP is: " + str(tattle_ip)) # Start the tattle tail if use_log: tattle.cmd("ifconfig | grep eth >> \"" + log_file + "\" 2>&1") tattle.sendCmd("./main.py --tattle-tail" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(tattle) + "\"" + " >> \"" + log_file + "\" 2>&1") else: tattle.cmd("ifconfig | grep eth 2>&1") tattle.sendCmd("./main.py --tattle-tail" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(tattle) + "\"" + " 2>&1") log.info("Done starting tattle tail") def start_video_traffic(self, use_log: bool = True): log = self.__logger.get() log.info("Starting video traffic") server_log_file = self.make_process_stdout_file_path( self.__run_name, "video-server-stdout") log.info("Video server stdout: " + server_log_file) client_log_file = self.make_process_stdout_file_path( self.__run_name, "video-clients-stdout") log.info("Video clients stdout: " + client_log_file) # Get video server instance server = self.__topo.get_video_server_instance() server_ip = server.IP() log.info("Video server IP is: " + str(server_ip)) # Start video server if use_log: server.cmd("ifconfig | grep eth >> \"" + server_log_file + "\" 2>&1") server.sendCmd("./main.py --video-server" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(server) + "\"" + " >> \"" + server_log_file + "\" 2>&1") else: server.cmd("ifconfig | grep eth 2>&1") server.sendCmd("./main.py --video-server" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(server) + "\"" + " 2>&1") # Grab client instances clients = list(self.__topo.get_video_client_instances().values()) # Start each client for client in clients: if use_log: client.cmd("ifconfig | grep eth >> \"" + client_log_file + "\" 2>&1") client.sendCmd("./main.py --video-client" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(client) + "\"" + " --host \"" + server_ip + "\"" + " >> \"" + client_log_file + "\" 2>&1") else: client.cmd("ifconfig | grep eth 2>&1") client.sendCmd("./main.py --video-client" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(client) + "\"" + " --host \"" + server_ip + "\"" + " 2>&1") log.info("Done starting video traffic") def start_file_traffic(self, use_log: bool = True): log = self.__logger.get() log.info("Starting file traffic") server_log_file = self.make_process_stdout_file_path( self.__run_name, "file-server-stdout") log.info("File server stdout: " + server_log_file) client_log_file = self.make_process_stdout_file_path( self.__run_name, "file-clients-stdout") log.info("File clients stdout: " + client_log_file) # Get file server instance server = self.__topo.get_file_server_instance() server_ip = server.IP() log.info("File server IP is: " + str(server_ip)) # Start file server if use_log: server.cmd("ifconfig | grep eth >> \"" + server_log_file + "\" 2>&1") server.sendCmd("./main.py --file-server" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(server) + "\"" + " --directory \"" + self.make_file_server_directory() + "\"" + " >> \"" + server_log_file + "\" 2>&1") else: server.cmd("ifconfig | grep eth 2>&1") server.sendCmd("./main.py --file-server" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(server) + "\"" + " --directory \"" + self.make_file_server_directory() + "\"" + " 2>&1") # Grab client instances clients = list(self.__topo.get_file_client_instances().values()) # Start each client for client in clients: if use_log: client.cmd("ifconfig | grep eth >> \"" + client_log_file + "\" 2>&1") client.sendCmd("./main.py --file-client" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(client) + "\"" + " --host \"" + server_ip + "\"" + " >> \"" + client_log_file + "\" 2>&1") else: client.cmd("ifconfig | grep eth 2>&1") client.sendCmd("./main.py --file-client" + " --run-name \"" + str(self.__run_name) + "\"" + " --name \"" + str(client) + "\"" + " --host \"" + server_ip + "\"" + " 2>&1") log.info("Done starting file traffic") def wait_for_hosts_to_finish(self): log = self.__logger.get() log.info("Start waiting for all hosts to finish") # Gather all hosts to wait for hosts_to_wait_for = list() for host_name in self.__topo.get_video_client_instances(): hosts_to_wait_for.append(self.__net.nameToNode[host_name]) for host_name in self.__topo.get_file_client_instances(): hosts_to_wait_for.append(self.__net.nameToNode[host_name]) # Wait for all the hosts for host in hosts_to_wait_for: log.info("Waiting for host " + str(host) + " to finish its command") host.waitOutput(verbose=True) log.info("Host " + str(host) + " has finished its command") log.info("Done waiting for all hosts to finish") def make_file_server_directory(self): d = os.path.join(os.path.dirname(__file__), self.__DEFAULT_FILE_SERVER_DIRECTORY) return d def summarize_node_logs(self): self.summarize_node_benchmark_logs() def summarize_node_benchmark_logs(self): log = self.__logger.get() log_s = self.__logger_summary.get() log.info("Attempting to summarize node benchmark logs") log_s.info("") log_s.info("*** " + self.__run_name + " ***") log_s.info("Attempting to summarize node benchmark logs") logs_dir = self.__logger.make_log_file_directory_path() log.info("Pulling from log directory: " + logs_dir) # Build a list of all nodes we're interested in nodes_file_clients = list( self.__topo.get_file_client_instances().values()) nodes_video_clients = list( self.__topo.get_video_client_instances().values()) nodes_all_clients = nodes_file_clients + nodes_video_clients log.info("Will examine logs from " + str(len(nodes_all_clients)) + " client nodes") pattern_bytes_received = re.compile( """^Bytes received: (?P<bytes>[0-9]+)$""", re.MULTILINE | re.IGNORECASE) pattern_mbps = re.compile( """^Megabits per second: (?P<mbps>[0-9.]+)$""", re.MULTILINE | re.IGNORECASE) # For each client we're interested in, pull the number of bytes transferred from its logs total_bytes = 0 mbps_all_samples = list() mbps_file_samples = list() mbps_video_samples = list() for node in nodes_all_clients: node_log_file_name = str(node) + ".txt" log_path = os.path.join(logs_dir, node_log_file_name) log.info("Examining log for node \"" + str(node) + "\": " + node_log_file_name) # Load the logfile with open(log_path, "rt") as f: s = f.read() # Bytes received match = pattern_bytes_received.search(s) if match is None: raise Exception( "Failed to parse node; Cannot find bytes received in: " + node_log_file_name) node_bytes = int(match.group("bytes")) total_bytes += node_bytes log.info("Node \"" + str(node) + "\" seems to have received " + str(node_bytes) + " bytes" + " (" + str(total_bytes) + " total)") # Megabits per second match = pattern_mbps.search(s) if match is None: raise Exception( "Failed to parse node; Cannot find megabits per second!" ) node_mbps = float(match.group("mbps")) log.info("Node \"" + str(node) + "\" seems to have received data at " + str(node_mbps) + " megabits per second") # Add to sample pools mbps_all_samples.append(node_mbps) if node in nodes_file_clients: mbps_file_samples.append(node_mbps) elif node in nodes_video_clients: mbps_video_samples.append(node_mbps) else: raise Exception( "Don't know where to add this node's bandwidth sample!" ) mbps_average_file = sum(mbps_file_samples) / len(mbps_file_samples) mbps_average_video = sum(mbps_video_samples) / len(mbps_video_samples) mbps_average_all = sum(mbps_all_samples) / len(mbps_all_samples) log.info("We seem to have the following aggregate bandwidths:") log_s.info("We seem to have the following aggregate bandwidths:") log.info("File clients: " + str(mbps_average_file) + " mbps") log_s.info("File clients: " + str(mbps_average_file) + " mbps") log.info("Video clients: " + str(mbps_average_video) + " mbps") log_s.info("Video clients: " + str(mbps_average_video) + " mbps") log.info("All clients: " + str(mbps_average_all) + " mbps") log_s.info("All clients: " + str(mbps_average_all) + " mbps")
def perfTest(): "Create network and run simple performance test" """available tests include FatTreeTopoNoLoop, DcellNoLoop, FacebookNoLoop""" topo = FatTreeTopoNoLoop(n=4) test = 'FatTreeTopoNoLoop' run_test = 2 #Set to 1 for IPERF test or 2 for Ping Test packetsize = 1472 #max packet size 1472 since MTU is 1500 net = Mininet(topo=topo, controller=RemoteController, link=TCLink, ipBase='192.168.0.0/16') net.start() seconds = 10 #dumpNodeConnections(net.hosts) #Dumps the connections from each host net.waitConnected() print "Waiting for network to converge" net.pingAll() host = {} print "Starting tests" if (test == 'FatTreeTopoNoLoop'): max_host = 512 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test == 'FacebookNoLoop'): print "*** Running Facebook tests ***" max_host = 48 for x in range(0, max_host): host_name = 'h' +str(x) host[x] = net.get(host_name) elif (test =='DcellNoLoop'): print "***Running DCellNoLoop tests" max_host = 50 for x in range(0, max_host): host_name = 'h' +str(x) print "Adding %s" % host_name host[x] = net.get(host_name) if (run_test == 1): print "IPERF Testing" sleep(5) elif (run_test ==2 and test =='DcellNoLoop'): print "Ping Testing" outfiles, errfiles = {}, {} bottom = 0 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-1): host[h].cmdPrint('ping', host[h+1].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[0].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) print "Monitoring output for", seconds, "seconds" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) #Still working on killing ping. Run as last test. #for h in range(0, max_host): #host[h].cmd( 'kill %ping') sleep(11) elif (run_test ==2 and test !='DcellNoLoop'): print "Ping Testing" outfiles, errfiles = {}, {} for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-1): host[h].cmdPrint('ping', host[h+1].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[0].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) print "Monitoring output for", seconds, "seconds" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) #Still working on killing ping. Run as last test. #for h in range(0, max_host): #host[h].cmd( 'kill %ping') sleep(11) print "Ending tests" net.stop()
from mininet.cli import CLI import os, time net = Mininet(switch = OVSSwitch) try: poxcontroller = net.addController(name="pox", controller=RemoteController, ip="127.0.0.1", protocol="tcp", port=6633) h1 = net.addHost("h1") h2 = net.addHost("h2") s1 = net.addSwitch("s1") net.addLink(h1, s1) #s1_eth1 net.addLink(s1, h2) #s1_eth2 net.start() #wait until all switches have connected to the controller net.waitConnected() #bandwidth testing, 100M == 100megabit. Connection: host1 to host2 listOfHosts = [h1, h2] net.iperf(listOfHosts, l4Type="UDP", seconds=5) #Testing the opposite connection: host2 to host1 listOfHosts.reverse() net.iperf(listOfHosts, l4Type="UDP", udpBw="100M", seconds=5) #Doing another test between h1 and h2 var = raw_input("Press enter for another test") listOfHosts.reverse() net.iperf(listOfHosts, l4Type="UDP", udpBw="100M", seconds=5) net.stop() os.system("sudo mn -c") #build-in mininet command that cleans up after stopping the net
def setupTopology(controller_addr): global net,c1,s1,s2,s3 global h1,h2,h3,h4,h5,h6,h7,h8,h9,h10 "Create and run multiple link network" net = Mininet(controller=RemoteController) print "mininet created" c1 = net.addController('c1', ip=controller_addr,port=6653) # h1: IOT Device. # h2 : StatciDHCPD # h3 : router / NAT # h4 : Non IOT device. h1 = net.addHost('h1') h2 = net.addHost('h2') h3 = net.addHost('h3') h4 = net.addHost('h4') h5 = net.addHost('h5') h6 = net.addHost('h6') h7 = net.addHost('h7') h8 = net.addHost('h8') h9 = net.addHost('h9') h10 = net.addHost('h10') hosts.append(h1) hosts.append(h2) hosts.append(h3) hosts.append(h4) hosts.append(h5) hosts.append(h6) hosts.append(h7) hosts.append(h8) hosts.append(h9) hosts.append(h10) s2 = net.addSwitch('s2',dpid="2") s3 = net.addSwitch('s3',dpid="3") s1 = net.addSwitch('s1',dpid="1") s1.linkTo(h1) s1.linkTo(h2) s1.linkTo(h3) s1.linkTo(h4) s1.linkTo(h5) s1.linkTo(h6) s1.linkTo(h7) s2.linkTo(h8) s3.linkTo(h8) s3.linkTo(h9) s3.linkTo(h10) # S2 is the NPE switch. # Direct link between S1 and S2 s1.linkTo(s2) h8.cmdPrint('echo 0 > /proc/sys/net/ipv4/ip_forward') # Flush old rules. h8.cmdPrint('iptables -F') h8.cmdPrint('iptables -t nat -F') h8.cmdPrint('iptables -t mangle -F') h8.cmdPrint('iptables -X') h8.cmdPrint('echo 1 > /proc/sys/net/ipv4/ip_forward') # Set up h3 to be our router (it has two interfaces). # Set up iptables to forward as NAT h8.cmdPrint('iptables -t nat -A POSTROUTING -o h8-eth1 -s 10.0.0.0/24 -j MASQUERADE') net.build() net.build() c1.start() s1.start([c1]) s2.start([c1]) s3.start([c1]) net.start() # Clean up any traces of the previous invocation (for safety) h1.setMAC("00:00:00:00:00:31","h1-eth0") h2.setMAC("00:00:00:00:00:32","h2-eth0") h3.setMAC("00:00:00:00:00:33","h3-eth0") h4.setMAC("00:00:00:00:00:34","h4-eth0") h5.setMAC("00:00:00:00:00:35","h5-eth0") h6.setMAC("00:00:00:00:00:36","h6-eth0") h7.setMAC("00:00:00:00:00:37","h7-eth0") h8.setMAC("00:00:00:00:00:38","h8-eth0") h9.setMAC("00:00:00:00:00:39","h9-eth0") h10.setMAC("00:00:00:00:00:3A","h10-eth0") # Set up a routing rule on h2 to route packets via h3 h1.cmdPrint('ip route del default') h1.cmdPrint('ip route add default via 10.0.0.8 dev h1-eth0') # Set up a routing rule on h2 to route packets via h3 h2.cmdPrint('ip route del default') h2.cmdPrint('ip route add default via 10.0.0.8 dev h2-eth0') # Set up a routing rule on h2 to route packets via h7 h3.cmdPrint('ip route del default') h3.cmdPrint('ip route add default via 10.0.0.8 dev h3-eth0') # Set up a routing rule on h2 to route packets via h3 h4.cmdPrint('ip route del default') h4.cmdPrint('ip route add default via 10.0.0.8 dev h4-eth0') # Set up a routing rule on h5 to route packets via h3 h5.cmdPrint('ip route del default') h5.cmdPrint('ip route add default via 10.0.0.8 dev h5-eth0') # h6 is a localhost. h6.cmdPrint('ip route del default') h6.cmdPrint('ip route add default via 10.0.0.8 dev h6-eth0') # The IDS runs on h8 h7.cmdPrint('ip route del default') h7.cmdPrint('ip route add default via 10.0.0.8 dev h7-eth0') # h9 is our fake host. It runs our "internet" web server. h9.cmdPrint('ifconfig h9-eth0 203.0.113.13 netmask 255.255.255.0') # Start a web server there. h9.cmdPrint('python ../util/http-server.py -H 203.0.113.13&') # h10 is our second fake host. It runs another internet web server that we cannot reach h10.cmdPrint('ifconfig h10-eth0 203.0.113.14 netmask 255.255.255.0') # Start a web server there. h10.cmdPrint('python ../util/http-server.py -H 203.0.113.14&') # Start dnsmasq (our dns server). h5.cmdPrint('/usr/sbin/dnsmasq --server 10.0.4.3 --pid-file=/tmp/dnsmasq.pid' ) # Set up our router routes. h8.cmdPrint('ip route add 203.0.113.13/32 dev h8-eth1') h8.cmdPrint('ip route add 203.0.113.14/32 dev h8-eth1') h8.cmdPrint('ifconfig h8-eth1 203.0.113.1 netmask 255.255.255.0') #subprocess.Popen(cmd,shell=True, stdin= subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) h2.cmdPrint("python ../util/udpping.py --port 4000 --server &") h4.cmdPrint("python ../util/udpping.py --port 4000 --server &") # h5 is a localhost peer. h4.cmdPrint("python ../util/udpping.py --port 8000 --server &") # h7 is the controller peer. h7.cmdPrint("python ../util/udpping.py --port 8002 --server &") net.waitConnected() print "*********** System ready *********" return net
def perfTest(): "Create network and run simple performance test" """available tests include FatTreeTopoNoLoop, FatTreeTopo, Dcell, Facebook""" topo = FatTreeTopoNoLoop(n=4) test = 'FatTreeTopoNoLoop' run_test = 2 #Set to 1 for IPERF test or 2 for Ping Test net = Mininet(topo=topo, controller=RemoteController, link=TCLink, ipBase='192.168.0.0/24') net.start() seconds = 10 #dumpNodeConnections(net.hosts) #Dumps the connections from each host net.waitConnected() print "Waiting for network to converge" net.pingAll() host = {} print "Starting tests" if (test == 'FatTreeTopoNoLoop' or test == 'FatTreeTopo' or test == 'FatTreeTopotest'): max_host = 8 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test == 'Facebook' or test =='FacebookNoLoop'): print "*** Running Facebook tests ***" max_host = 48 for y in range(0, max_host): host_name = 'h' +str(y) host[y] = net.get(host_name) elif (test =='DcellNoLoop' or test == 'Dcell'): print "***Running DCellNoLoop tests" max_host = 25 for x in range(0, max_host): host_name = 'h' +str(x) print "Adding %s" % host_name host[x] = net.get(host_name) if (run_test == 1): print "IPERF Testing" if ((max_host%2) == 0): for x in range(0, (max_host/2)): src = host[x] dst = host[(max_host-1)-x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) for x in range(0, (max_host/2)): dst = host[x] src = host[(max_host-1)-x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) else: dst = host[0] src = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[1] src = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[2] src = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[3] src = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[6] src = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[7] src = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[8] src = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[12] src = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[13] src = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[18] src = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) src = host[0] dst = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[1] dst = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[2] dst = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[3] dst = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[6] dst = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[7] dst = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[8] dst = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[12] dst = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[13] dst = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[18] dst = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) elif (run_test ==2 and test =='DcellNoLoop'): print "Ping Testing" outfiles, errfiles = {}, {} packetsize = 1454 #max packet size 1472. MTU set to 1500 bottom = 0 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-5): host[h].cmdPrint('ping', host[h+5].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[bottom].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) bottom = bottom +1 print "Monitoring output for", seconds, "seconds" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) #Still working on killing ping. Run as last test. #for h in range(0, max_host): #host[h].cmd( 'kill %ping') sleep(11) elif (run_test ==2 and test !='DcellNoLoop'): print "Ping Testing" outfiles, errfiles = {}, {} packetsize = 54 #max packet size 1472. MTU set to 1500 for h in range(0, max_host): # Create and/or erase output files outfiles[ host[h] ] = '/tmp/%s.out' % host[h].name errfiles[ host[h] ] = '/tmp/%s.err' % host[h].name host[h].cmd( 'echo >', outfiles[ host[h] ] ) host[h].cmd( 'echo >', errfiles[ host[h] ] ) # Start pings if (h<max_host-1): host[h].cmdPrint('ping', host[h+1].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) else: host[h].cmdPrint('ping', host[0].IP(), '-s', packetsize, '>', outfiles[ host[h] ], '2>', errfiles[ host[h] ], '&' ) print "Monitoring output for", seconds, "seconds" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles( outfiles, seconds, timeoutms=500 ): if host[h]: f.write(line) #Still working on killing ping. Run as last test. #for h in range(0, max_host): #host[h].cmd( 'kill %ping') sleep(11) print "Ending tests" net.stop()
class FaucetTest(unittest.TestCase): ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss' CONFIG = '' CONTROLLER_IPV4 = '10.0.0.254' CONTROLLER_IPV6 = 'fc00::1:254' OFCTL = 'ovs-ofctl -OOpenFlow13' def setUp(self): self.tmpdir = tempfile.mkdtemp() os.environ['FAUCET_CONFIG'] = os.path.join(self.tmpdir, 'faucet.yaml') os.environ['FAUCET_LOG'] = os.path.join(self.tmpdir, 'faucet.log') os.environ['FAUCET_EXCEPTION_LOG'] = os.path.join( self.tmpdir, 'faucet-exception.log') self.debug_log_path = os.path.join(self.tmpdir, 'ofchannel.log') self.CONFIG = '\n'.join( (self.get_config_header(DPID, HARDWARE), self.CONFIG % PORT_MAP, 'ofchannel_log: "%s"' % self.debug_log_path)) open(os.environ['FAUCET_CONFIG'], 'w').write(self.CONFIG) self.net = None self.topo = None def get_config_header(self, dpid, hardware): return ''' --- dp_id: 0x%s name: "faucet-1" hardware: "%s" ''' % (dpid, hardware) def attach_physical_switch(self): switch = self.net.switches[0] hosts_count = len(self.net.hosts) for i, test_host_port in enumerate(sorted(SWITCH_MAP)): port_i = i + 1 mapped_port_i = port_i + hosts_count phys_port = Intf(SWITCH_MAP[test_host_port], node=switch) switch.cmd('ifconfig %s up' % phys_port) switch.cmd('ovs-vsctl add-port %s %s' % (switch.name, phys_port.name)) for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)): port_x, port_y = port_pair switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % (self.OFCTL, switch.name, port_x, port_y)) for _ in range(20): if (os.path.exists(self.debug_log_path) and os.path.getsize(self.debug_log_path) > 0): return time.sleep(1) print 'physical switch could not connect to controller' sys.exit(-1) def start_net(self): self.net = Mininet(self.topo, controller=FAUCET) self.net.start() if SWITCH_MAP: self.attach_physical_switch() else: self.net.waitConnected() self.wait_until_matching_flow('actions=CONTROLLER') dumpNodeConnections(self.net.hosts) def tearDown(self): if self.net is not None: self.net.stop() # Mininet takes a long time to actually shutdown. # TODO: detect and block when Mininet isn't done. time.sleep(5) shutil.rmtree(self.tmpdir) def add_host_ipv6_address(self, host, ip_v6): host.cmd('ip -6 addr add %s dev %s' % (ip_v6, host.intf())) def one_ipv4_ping(self, host, dst): ping_result = host.cmd('ping -c1 %s' % dst) self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result)) def one_ipv4_controller_ping(self, host): self.one_ipv4_ping(host, self.CONTROLLER_IPV4) def one_ipv6_ping(self, host, dst): # TODO: retry our one ping. We should not have to retry. for _ in range(2): ping_result = host.cmd('ping6 -c1 %s' % dst) if re.search(self.ONE_GOOD_PING, ping_result): return self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result)) def one_ipv6_controller_ping(self, host): self.one_ipv6_ping(host, self.CONTROLLER_IPV6) def wait_until_matching_flow(self, flow, timeout=5): # TODO: actually verify flows were communicated to the physical switch. # Could use size of ofchannel log, though this is not authoritative. if SWITCH_MAP: time.sleep(1) return switch = self.net.switches[0] for _ in range(timeout): dump_flows_cmd = '%s dump-flows %s' % (self.OFCTL, switch.name) dump_flows = switch.cmd(dump_flows_cmd) for line in dump_flows.split('\n'): if re.search(flow, line): return time.sleep(1) print flow, dump_flows self.assertTrue(re.search(flow, dump_flows)) def swap_host_macs(self, first_host, second_host): first_host_mac = first_host.MAC() second_host_mac = second_host.MAC() first_host.setMAC(second_host_mac) second_host.setMAC(first_host_mac) def verify_ipv4_routing(self, first_host, first_host_routed_ip, second_host, second_host_routed_ip): first_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (first_host.intf(), first_host_routed_ip.ip))) second_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (second_host.intf(), second_host_routed_ip.ip))) first_host.cmd( ('route add -net %s gw %s' % (second_host_routed_ip.masked(), self.CONTROLLER_IPV4))) second_host.cmd( ('route add -net %s gw %s' % (first_host_routed_ip.masked(), self.CONTROLLER_IPV4))) self.net.ping(hosts=(first_host, second_host)) self.wait_until_matching_flow( 'nw_dst=%s.+set_field:%s->eth_dst' % (first_host_routed_ip.masked(), first_host.MAC())) self.wait_until_matching_flow( 'nw_dst=%s.+set_field:%s->eth_dst' % (second_host_routed_ip.masked(), second_host.MAC())) self.one_ipv4_ping(first_host, second_host_routed_ip.ip) self.one_ipv4_ping(second_host, first_host_routed_ip.ip) def verify_ipv6_routing(self, first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip): self.add_host_ipv6_address(first_host, first_host_ip) self.add_host_ipv6_address(second_host, second_host_ip) self.one_ipv6_ping(first_host, second_host_ip.ip) self.one_ipv6_ping(second_host, first_host_ip.ip) self.add_host_ipv6_address(first_host, first_host_routed_ip) self.add_host_ipv6_address(second_host, second_host_routed_ip) first_host.cmd('ip -6 route add %s via %s' % (second_host_routed_ip.masked(), self.CONTROLLER_IPV6)) second_host.cmd('ip -6 route add %s via %s' % (first_host_routed_ip.masked(), self.CONTROLLER_IPV6)) self.wait_until_matching_flow( 'ipv6_dst=%s.+set_field:%s->eth_dst' % (first_host_routed_ip.masked(), first_host.MAC())) self.wait_until_matching_flow( 'ipv6_dst=%s.+set_field:%s->eth_dst' % (second_host_routed_ip.masked(), second_host.MAC())) self.one_ipv6_controller_ping(first_host) self.one_ipv6_controller_ping(second_host) self.one_ipv6_ping(first_host, second_host_routed_ip.ip) self.one_ipv6_ping(second_host, first_host_routed_ip.ip)
def perfTest(): "Create network and run simple performance test" """available tests include FatTreeTopoNoLoop, FatTreeTopo, Dcell, Facebook""" topo = FatTreeTopoNoLoop(n=4) test = 'FatTreeTopoNoLoop' run_test = 2 #Set to 1 for IPERF test or 2 for Ping Test net = Mininet(topo=topo, controller=RemoteController, link=TCLink, ipBase='192.168.0.0/24') net.start() seconds = 10 #dumpNodeConnections(net.hosts) #Dumps the connections from each host net.waitConnected() print "Waiting for network to converge" net.pingAll() host = {} print "Starting tests" if (test == 'FatTreeTopoNoLoop' or test == 'FatTreeTopo' or test == 'FatTreeTopotest'): max_host = 8 for y in range(0, max_host): host_name = 'h' + str(y) host[y] = net.get(host_name) elif (test == 'Facebook' or test == 'FacebookNoLoop'): print "*** Running Facebook tests ***" max_host = 48 for y in range(0, max_host): host_name = 'h' + str(y) host[y] = net.get(host_name) elif (test == 'DcellNoLoop' or test == 'Dcell'): print "***Running DCellNoLoop tests" max_host = 25 for x in range(0, max_host): host_name = 'h' + str(x) print "Adding %s" % host_name host[x] = net.get(host_name) if (run_test == 1): print "IPERF Testing" if ((max_host % 2) == 0): for x in range(0, (max_host / 2)): src = host[x] dst = host[(max_host - 1) - x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) for x in range(0, (max_host / 2)): dst = host[x] src = host[(max_host - 1) - x] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) else: dst = host[0] src = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[1] src = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[2] src = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[3] src = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[6] src = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[7] src = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[8] src = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[12] src = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[13] src = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) dst = host[18] src = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) src = host[0] dst = host[5] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[1] dst = host[10] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[2] dst = host[15] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[3] dst = host[20] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[6] dst = host[11] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[7] dst = host[16] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[8] dst = host[21] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[12] dst = host[17] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[13] dst = host[22] thread.start_new_thread(iperf_thread, (net, src, dst)) src = host[18] dst = host[23] thread.start_new_thread(iperf_thread, (net, src, dst)) sleep(10) elif (run_test == 2 and test == 'DcellNoLoop'): print "Ping Testing" outfiles, errfiles = {}, {} packetsize = 1454 #max packet size 1472. MTU set to 1500 bottom = 0 for h in range(0, max_host): # Create and/or erase output files outfiles[host[h]] = '/tmp/%s.out' % host[h].name errfiles[host[h]] = '/tmp/%s.err' % host[h].name host[h].cmd('echo >', outfiles[host[h]]) host[h].cmd('echo >', errfiles[host[h]]) # Start pings if (h < max_host - 5): host[h].cmdPrint('ping', host[h + 5].IP(), '-s', packetsize, '>', outfiles[host[h]], '2>', errfiles[host[h]], '&') else: host[h].cmdPrint('ping', host[bottom].IP(), '-s', packetsize, '>', outfiles[host[h]], '2>', errfiles[host[h]], '&') bottom = bottom + 1 print "Monitoring output for", seconds, "seconds" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles(outfiles, seconds, timeoutms=500): if host[h]: f.write(line) #Still working on killing ping. Run as last test. #for h in range(0, max_host): #host[h].cmd( 'kill %ping') sleep(11) elif (run_test == 2 and test != 'DcellNoLoop'): print "Ping Testing" outfiles, errfiles = {}, {} packetsize = 54 #max packet size 1472. MTU set to 1500 for h in range(0, max_host): # Create and/or erase output files outfiles[host[h]] = '/tmp/%s.out' % host[h].name errfiles[host[h]] = '/tmp/%s.err' % host[h].name host[h].cmd('echo >', outfiles[host[h]]) host[h].cmd('echo >', errfiles[host[h]]) # Start pings if (h < max_host - 1): host[h].cmdPrint('ping', host[h + 1].IP(), '-s', packetsize, '>', outfiles[host[h]], '2>', errfiles[host[h]], '&') else: host[h].cmdPrint('ping', host[0].IP(), '-s', packetsize, '>', outfiles[host[h]], '2>', errfiles[host[h]], '&') print "Monitoring output for", seconds, "seconds" f = open('output%s.txt' % str(packetsize), 'w') for host[h], line in monitorFiles(outfiles, seconds, timeoutms=500): if host[h]: f.write(line) #Still working on killing ping. Run as last test. #for h in range(0, max_host): #host[h].cmd( 'kill %ping') sleep(11) print "Ending tests" net.stop()