def __init__(self, net, parent=None, width=4): Frame.__init__(self, parent) self.top = self.winfo_toplevel() self.top.title('Mininet') self.net = net self.menubar = self.createMenuBar() cframe = self.cframe = Frame(self) self.consoles = {} # consoles themselves titles = { 'hosts': 'Host', 'switches': 'Switch', 'controllers': 'Controller' } for name in titles: nodes = getattr(net, name) frame, consoles = self.createConsoles(cframe, nodes, width, titles[name]) self.consoles[name] = Object(frame=frame, consoles=consoles) self.selected = None self.select('hosts') self.cframe.pack(expand=True, fill='both') cleanUpScreens() # Close window gracefully Wm.wm_protocol(self.top, name='WM_DELETE_WINDOW', func=self.quit) # Initialize graph graph = Graph(cframe) self.consoles['graph'] = Object(frame=graph, consoles=[graph]) self.graph = graph self.graphVisible = False self.updates = 0 self.hostCount = len(self.consoles['hosts'].consoles) self.bw = 0 self.pack(expand=True, fill='both')
def __init__( self, net, controllerRESTApi, parent=None, width=4 ): Frame.__init__( self, parent ) self.top = self.winfo_toplevel() self.top.title( 'Mininet' ) self.net = net self.controllerRESTApi = controllerRESTApi self.menubar = self.createMenuBar() cframe = self.cframe = Frame( self ) self.consoles = {} # consoles themselves titles = { 'hosts': 'Host', 'switches': 'Switch', 'controllers': 'Controller' } for name in titles: nodes = getattr( net, name ) frame, consoles = self.createConsoles( cframe, nodes, width, titles[ name ] ) self.consoles[ name ] = Object( frame=frame, consoles=consoles ) self.selected = None self.select( 'hosts' ) self.cframe.pack( expand=True, fill='both' ) cleanUpScreens() # Close window gracefully Wm.wm_protocol( self.top, name='WM_DELETE_WINDOW', func=self.quit ) self.pack( expand=True, fill='both' )
def run(self): '''Run the lab 3 simulation environment''' localJitter = 10 # ms, the evolution of the time between two consecutive packets # We create the topology topology = Lab3Topology(nbOfServersPerRegion, nbOfClientsPerRegion, nbOfRegions) # We create the simulation # Set the topology, the class for links and interfaces, the mininet environment must be cleaned up before launching, we should build now the topology simulation = Mininet(topo = topology, link = TCLink, intf = TCIntf, cleanup = True, build = True, ipBase='10.1.0.0/24') # We connect the network to Internet simulation.addNAT().configDefault() # We can start the simulation print "Starting the simulation..." simulation.start() # For each host for host in simulation.hosts: # We set the jitter (It can only be done after the simulation was started, not from the Topology) host.defaultIntf().config(jitter = ("%dms" % localJitter)) # for each server for server in simulation.hosts: if "vessel" in server.name: # We open a xterm and start the server self.startServer(server) makeTerm(node=simulation.getNodeByName("client1"), cmd="firefox") # We also start the Command Line Interface of Mininet CLI(simulation) # Once the CLI is closed (with exit), we can stop the simulation print "Stopping the simulation NOW!" # We close the xterms (mininet.term.cleanUpScreens) cleanUpScreens() simulation.stop()
def __init__( self, net, parent=None, width=4 ): Frame.__init__( self, parent ) self.top = self.winfo_toplevel() self.top.title( 'Mininet' ) self.net = net self.menubar = self.createMenuBar() cframe = self.cframe = Frame( self ) self.consoles = {} # consoles themselves titles = { 'hosts': 'Host', 'switches': 'Switch', 'controllers': 'Controller' } for name in titles: nodes = getattr( net, name ) frame, consoles = self.createConsoles( cframe, nodes, width, titles[ name ] ) self.consoles[ name ] = Object( frame=frame, consoles=consoles ) self.selected = None self.select( 'hosts' ) self.cframe.pack( expand=True, fill='both' ) cleanUpScreens() # Close window gracefully Wm.wm_protocol( self.top, name='WM_DELETE_WINDOW', func=self.quit ) # Initialize graph graph = Graph( cframe ) self.consoles[ 'graph' ] = Object( frame=graph, consoles=[ graph ] ) self.graph = graph self.graphVisible = False self.updates = 0 self.hostCount = len( self.consoles[ 'hosts' ].consoles ) self.bw = 0 self.pack( expand=True, fill='both' )
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd udpbwtest' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) info( "*** Removing junk from /tmp\n" ) sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' ) info( "*** Removing old screen sessions\n" ) cleanUpScreens() info( "*** Removing excess kernel datapaths\n" ) dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).split( '\n' ) for dp in dps: if dp != '': sh( 'dpctl deldp ' + dp ) info( "*** Removing all links of the pattern foo-ethX\n" ) links = sh( "ip link show | egrep -o '(\w+-eth\w+)'" ).split( '\n' ) for link in links: if link != '': sh( "ip link del " + link ) info( "*** Cleanup complete.\n" )
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd udpbwtest' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. sh('killall -9 ' + zombies + ' 2> /dev/null') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log') info("*** Removing old screen sessions\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").split('\n') for dp in dps: if dp != '': sh('dpctl deldp ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh("ip link show | egrep -o '(\w+-eth\w+)'").split('\n') for link in links: if link != '': sh("ip link del " + link) info("*** Cleanup complete.\n")
def startTerms(self): "Start a terminal for each node." info("*** Running terms on %s\n" % os.environ['DISPLAY']) cleanUpScreens() self.terms += makeTerms(self.controllers, 'controller') self.terms += makeTerms(self.switches, 'switch') self.terms += makeTerms(self.hosts, 'host')
def __init__(self, net, parent=None, width=4): Frame.__init__(self, parent) self.top = self.winfo_toplevel() self.top.title("Mininet") self.net = net self.menubar = self.createMenuBar() cframe = self.cframe = Frame(self) self.consoles = {} # consoles themselves titles = {"hosts": "Host", "switches": "Switch", "controllers": "Controller"} for name in titles: nodes = getattr(net, name) frame, consoles = self.createConsoles(cframe, nodes, width, titles[name]) self.consoles[name] = Object(frame=frame, consoles=consoles) self.selected = None self.select("hosts") self.cframe.pack(expand=True, fill="both") cleanUpScreens() # Close window gracefully Wm.wm_protocol(self.top, name="WM_DELETE_WINDOW", func=self.quit) # Initialize graph graph = Graph(cframe) self.consoles["graph"] = Object(frame=graph, consoles=[graph]) self.graph = graph self.graphVisible = False self.updates = 0 self.hostCount = len(self.consoles["hosts"].consoles) self.bw = 0 self.pack(expand=True, fill="both")
def startTerms( self ): "Start a terminal for each node." info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) cleanUpScreens() self.terms += makeTerms( self.controllers, 'controller' ) self.terms += makeTerms( self.switches, 'switch' ) self.terms += makeTerms( self.hosts, 'host' )
def run(self): '''Run the simulation environment''' # We create the topology topology = DistributedTopology(self.nb_of_servers) # We create the simulation # Set the topology, the class for links and interfaces, the mininet environment must be cleaned up before launching, we should build now the topology simulation = Mininet(topo=topology, link=TCLink, intf=TCIntf, cleanup=True, build=True, ipBase='10.1.0.0/24') # We connect the network to Internet simulation.addNAT().configDefault() terms = [] # We can start the simulation print "Starting the simulation..." simulation.start() for srv in simulation.hosts: if "server" in srv.name: # We open a xterm and start the server terms.append(self.startServer(srv)[0]) # We also start the Command Line Interface of Mininet CLI(simulation) # Once the CLI is closed (with exit), we can stop the simulation print "Stopping the simulation NOW!" # We close the xterms (mininet.term.cleanUpScreens) cleanUpScreens() for term in terms: os.kill(term.pid, signal.SIGKILL) simulation.stop()
def startTerms(self): "Start a terminal for each node." info("*** Running terms on %s\n" % os.environ["DISPLAY"]) cleanUpScreens() self.terms += makeTerms(self.controllers, "controller") self.terms += makeTerms(self.switches, "switch") self.terms += makeTerms(self.hosts, "host")
def createCfram(self,width=4): cframe = self.cframe = Frame( self ) self.consoles = {} titles = { 'hosts': 'Host', 'switches': 'Switch', 'controllers': 'Controller' } for name in titles: nodes = getattr( self.netManager.getNet(), name) frame, consoles = self.createConsoles(cframe, nodes, width, titles[ name ] ) if name == 'hosts': self.hosts = consoles self.consoles[ name ] = Object( frame=frame, consoles=consoles ) self.selected = None self.select( 'hosts' ) self.cframe.pack( expand=True, fill='both' ) cleanUpScreens() # Close window gracefully Wm.wm_protocol( self.top, name='WM_DELETE_WINDOW', func=self.quit ) # Initialize graph graph = Graph( cframe ) self.consoles[ 'graph' ] = Object( frame=graph, consoles=[ graph ] ) self.graph = graph self.graphVisible = False self.updates = 0 self.hostCount = len( self.consoles[ 'hosts' ].consoles ) self.bw = 0
def start_xterm(self, host_name): "Start a terminal for each node." if 'DISPLAY' not in os.environ: error("Error starting terms: Cannot connect to display\n") return info("*** Running terms on %s\n" % os.environ['DISPLAY']) cleanUpScreens() self.terms += makeTerms([self.net.get(host_name)], 'host')
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh('killall ' + zombies + ' 2> /dev/null') time.sleep(1) sh('killall -9 ' + zombies + ' 2> /dev/null') # And kill off sudo mnexec sh('pkill -9 -f "sudo mnexec"') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log') info("*** Removing old X11 tunnels\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").splitlines() for dp in dps: if dp: sh('dpctl deldp ' + dp) info("*** Removing OVS datapaths") dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh("ovs-vsctl " + " -- ".join("--if-exists del-br " + dp for dp in dps if dp)) # And in case the above didn't work... dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() for dp in dps: sh('ovs-vsctl del-br ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh("ip link show | " "egrep -o '[[:digit:]]+-[[:digit:]]+'").splitlines() #"egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'" ).splitlines() for link in links: if link: sh("ip link del " + link) info("*** Killing stale mininet node processes\n") killprocs('mininet:') info("*** Shutting down stale tunnels\n") killprocs('Tunnel=Ethernet') killprocs('.ssh/mn') sh('rm -f ~/.ssh/mn/*') info("*** Cleanup complete.\n")
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh( 'killall ' + zombies + ' 2> /dev/null' ) time.sleep(1) sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) # And kill off sudo mnexec sh( 'pkill -9 -f "sudo mnexec"') info( "*** Removing junk from /tmp\n" ) sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' ) info( "*** Removing old X11 tunnels\n" ) cleanUpScreens() info( "*** Removing excess kernel datapaths\n" ) dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).splitlines() for dp in dps: if dp: sh( 'dpctl deldp ' + dp ) info( "*** Removing OVS datapaths" ) dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh( "ovs-vsctl " + " -- ".join( "--if-exists del-br " + dp for dp in dps if dp ) ) # And in case the above didn't work... dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() for dp in dps: sh( 'ovs-vsctl del-br ' + dp ) info( "*** Removing all links of the pattern foo-ethX\n" ) links = sh( "ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'" ).splitlines() for link in links: if link: sh( "ip link del " + link ) info( "*** Killing stale mininet node processes\n" ) killprocs( 'mininet:' ) info ( "*** Shutting down stale tunnels\n" ) killprocs( 'Tunnel=Ethernet' ) killprocs( '.ssh/mn') sh( 'rm -f ~/.ssh/mn/*' ) info( "*** Cleanup complete.\n" )
def __init__(self, net, parent=None, width=4): Frame.__init__(self, parent) self.top = self.winfo_toplevel() self.gheight = 800 self.top.title('Mininet') self.net = net self.menubar = self.createMenuBar() self.consoles = {} self.sconsoles = {} # consoles themselves titles = { 'hosts': 'Host', 'switches': 'Switch', 'controllers': 'Controller' } # (servers + clients) * 20 canvas = self.canvas = Canvas( self, width=1600, height=800, scrollregion=(0, 0, 0, ((servers + nathost + clients + 200) // 4) * 280)) # cframe = self.cframe = Frame(canvas, width=1600, height=((servers + nathost + clients + 3) // 4) * 264) cframe = self.cframe = Frame(canvas) for name in titles: nodes = getattr(net, name) frame, consoles = self.createConsoles(cframe, nodes, width, titles[name]) self.consoles[name] = Object(frame=frame, consoles=consoles) canvas.create_window( (800, ((servers + nathost + clients + 100) // 4) * 132), window=cframe) self.selected = None self.select('hosts') ybar = Scrollbar(self, orient='vertical', command=canvas.yview) canvas.config(yscrollcommand=ybar.set) ybar.pack(side='right', fill='y') canvas.pack(expand=True, fill='both') cleanUpScreens() # Close window gracefully Wm.wm_protocol(self.top, name='WM_DELETE_WINDOW', func=self.quit) # Initialize graph graph = Graph(cframe) self.consoles['graph'] = Object(frame=graph, consoles=[graph]) self.graph = graph self.graphVisible = False self.updates = 0 self.hostCount = len(self.consoles['hosts'].consoles) self.bw = 0 self.pack(expand=True, fill='both')
def hostTerm(net): "Start a terminal for each node." if 'DISPLAY' not in os.environ: error("Error starting terms: Cannot connect to display\n") return info("*** Running terms on %s\n" % os.environ['DISPLAY']) cleanUpScreens() terms = [] net.terms += makeTerms(net.hosts, 'host')
def startTerms( self ): "Start a terminal for each node." if 'DISPLAY' not in os.environ: error( "Error starting terms: Cannot connect to display\n" ) return info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) cleanUpScreens() self.terms += makeTerms( self.controllers, 'controller' ) self.terms += makeTerms( self.switches, 'switch' ) self.terms += makeTerms( self.hosts, 'host' )
def startTerms(self): "Start a terminal for each node." if "DISPLAY" not in os.environ: error("Error starting terms: Cannot connect to display\n") return info("*** Running terms on %s\n" % os.environ["DISPLAY"]) cleanUpScreens() self.terms += makeTerms(self.controllers, "controller") self.terms += makeTerms(self.switches, "switch") self.terms += makeTerms(self.hosts, "host")
def startTerms(self): "Start a terminal for each node." if 'DISPLAY' not in os.environ: error("Error starting terms: Cannot connect to display\n") return info("*** Running terms on %s\n" % os.environ['DISPLAY']) cleanUpScreens() self.terms += makeTerms(self.controllers, 'controller') self.terms += makeTerms(self.switches, 'switch') self.terms += makeTerms(self.hosts, 'host')
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh('killall ' + zombies + ' 2> /dev/null') time.sleep(1) sh('killall -9 ' + zombies + ' 2> /dev/null') # And kill off sudo mnexec sh('pkill -9 -f "sudo mnexec"') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log') info("*** Removing old X11 tunnels\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").split('\n') for dp in dps: if dp != '': sh('dpctl deldp ' + dp) info("*** Removing OVS datapaths") dps = sh("ovs-vsctl --timeout=1 list-br").split('\n') for dp in dps: if dp: sh('ovs-vsctl del-br ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh(r"ip link show | egrep -o '(\w+-eth\w+)'").split('\n') for link in links: if link != '': sh("ip link del " + link) info("*** Removing all links of the pattern mn-brX\n") bridges = sh(r"sudo brctl show | egrep -o '(mn+-br\w+)'").split('\n') for bridge in bridges: if bridge != '': sh("ifconfig " + bridge + " down") sh("brctl delbr " + bridge) info("*** Cleanup complete.\n")
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh( 'killall ' + zombies + ' 2> /dev/null' ) time.sleep(1) sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) # And kill off sudo mnexec sh( 'pkill -9 -f "sudo mnexec"') info( "*** Removing junk from /tmp\n" ) sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' ) info( "*** Removing old X11 tunnels\n" ) cleanUpScreens() info( "*** Removing excess kernel datapaths\n" ) dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).split( '\n' ) for dp in dps: if dp != '': sh( 'dpctl deldp ' + dp ) info( "*** Removing OVS datapaths" ) dps = sh("ovs-vsctl --timeout=1 list-br").split( '\n' ) for dp in dps: if dp: sh( 'ovs-vsctl del-br ' + dp ) info( "*** Removing all links of the pattern foo-ethX\n" ) links = sh( r"ip link show | egrep -o '(\w+-eth\w+)'" ).split( '\n' ) for link in links: if link != '': sh( "ip link del " + link ) info( "*** Removing all links of the pattern mn-brX\n" ) bridges = sh( r"sudo brctl show | egrep -o '(mn+-br\w+)'" ).split( '\n' ) for bridge in bridges: if bridge != '': sh( "ifconfig " + bridge + " down") sh( "brctl delbr " + bridge ) info( "*** Cleanup complete.\n" )
def startTerms(self): # pragma: no cover "Start a terminal for each node." if "DISPLAY" not in os.environ: error("Error starting terms: Cannot connect to display\n") return info("*** Running terms on %s\n" % os.environ["DISPLAY"]) cleanUpScreens() self.terms += makeTerms(self.controllers, "controller") self.terms += makeTerms(self.switches, "switch") dhosts = [h for h in self.hosts if isinstance(h, DockerHost)] for d in dhosts: self.terms.append(spawnXtermDocker(d.name)) rest = [h for h in self.hosts if h not in dhosts] self.terms += makeTerms(rest, "host")
def __init__( self, nets=None, parent=None, width=3 ): Frame.__init__( self, parent ) #data structure self.netManager = NetManager() self.dataManager = DataManager() self.scheduler = Scheduler() self.hosts = [] # UI self.top = self.winfo_toplevel() self.top.title( 'Mininet节点调度子系统' ) self.createMenuBar() self.menubar = self.createFramBar() self.createCfram() cleanUpScreens() self.pack( expand=True, fill='both' )
def run(): # Creates the virtual environment, by starting the network and configuring debug information info('** Creating an instance of Lab6 network topology\n') topo = Lab6Topo() info('** Starting the network\n') global net global hosts # We specify the OVSSwitch for better IPv6 performance # We use mininext constructor with the instance of the network, the default controller and the customized openvswitch net = MiniNExT(topo, controller=Controller, switch=OVSSwitch) net.start() info('** Executing custom commands\n') # Space to add any customize command before prompting command line # We assign IPv6 addresses to hosts h1, h2, and h5 as they are not configured through Quagga # We gather only the hosts created in the topology (no switches nor controller) hosts = [ net.getNodeByName( h ) for h in topo.hosts() ] for host in hosts: # Only to hosts: We assign IPv6 address if host.name == 'h1': host.cmd('ip -6 addr add 2001:1:0:1010::10/64 dev h1-eth1') host.cmd('ip -6 route add default via 2001:1:0:1010::1') elif host.name == 'h2': host.cmd('ip -6 addr add 2001:1:0:2020::20/64 dev h2-eth1') host.cmd('ip -6 route add default via 2001:1:0:2020::2') elif host.name == 'h5': host.cmd('ip -6 addr add 2001:1:0:5050::50/64 dev h5-eth1') host.cmd('ip -6 route add default via 2001:1:0:5050::5') # Enable Xterm window for every host info('** Enabling xterm for hosts only\n') # We check if the display is available if 'DISPLAY' not in os.environ: error( "Error starting terms: Cannot connect to display\n" ) return # Remove previous (and possible non-used) socat X11 tunnels cleanUpScreens() # Mininet's function to create Xterms in hosts makeTerms( hosts, 'host' ) # Enable the mininext> prompt info('** Running CLI\n') CLI(net)
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/switches/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd udpbwtest ovs-openflowd ovs-controller' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. sh('killall -9 ' + zombies + ' 2> /dev/null') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log /tmp/mn-*sock') info("*** Removing old screen sessions\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").split('\n') for dp in dps: if dp: sh('dpctl deldp ' + dp) # OVS is tricky. We try the default DB connection. # Finally we also delete the kernel datapath ovsdps = sh( "ovs-vsctl --no-wait -t1 list-br 2>/dev/null | egrep '^mn-dp[0-9]+$'" ).split('\n') for dp in ovsdps: if dp: sh('ovs-vsctl --no-wait -t1 del-br ' + dp + ' 2>/dev/null') ovsdps = sh("ovs-dpctl show | egrep '\w+@\mn-dp[0-9]+:'").split('\n') for dp in ovsdps: if dp: sh('ovs-dpctl del-dp ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh("ip link show | egrep -o '(\w+-eth\w+)'").split('\n') for link in links: if link != '': sh("ip link del " + link) info("*** Cleanup complete.\n")
def cleanup(): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/switches/pings/noxes" "\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd udpbwtest ovs-openflowd ovs-controller' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) info( "*** Removing junk from /tmp\n" ) sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log /tmp/mn-*sock' ) info( "*** Removing old screen sessions\n" ) cleanUpScreens() info( "*** Removing excess kernel datapaths\n" ) dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).split( '\n' ) for dp in dps: if dp: sh( 'dpctl deldp ' + dp ) # OVS is tricky. We try the default DB connection. # Finally we also delete the kernel datapath ovsdps = sh( "ovs-vsctl --no-wait -t1 list-br 2>/dev/null | egrep '^mn-dp[0-9]+$'" ).split( '\n' ) for dp in ovsdps: if dp: sh( 'ovs-vsctl --no-wait -t1 del-br ' + dp + ' 2>/dev/null') ovsdps = sh( "ovs-dpctl show | egrep '\w+@\mn-dp[0-9]+:'" ).split( '\n' ) for dp in ovsdps: if dp: sh( 'ovs-dpctl del-dp ' + dp ) info( "*** Removing all links of the pattern foo-ethX\n" ) links = sh( "ip link show | egrep -o '(\w+-eth\w+)'" ).split( '\n' ) for link in links: if link != '': sh( "ip link del " + link ) info( "*** Cleanup complete.\n" )
def run(self, amount=''): "Run the lab 1 simulation environment" # local variables if amount: nbOfServersPerRegion = int(amount) else: nbOfServersPerRegion = 4 nbOfClientsPerRegion = 2 nbOfRegions = 1 localJitter = 10 # ms, the evolution of the time between two consecutive packets # We create the topology topology = Lab1Topology(nbOfServersPerRegion, nbOfClientsPerRegion, nbOfRegions) # We create the simulation # Set the topology, the class for links and interfaces, the mininet environment must be cleaned up before launching, we should build now the topology simulation = Mininet(topo=topology, link=TCLink, intf=TCIntf, cleanup=True, build=True, ipBase='10.1.0.0/24') # We connect the network to Internet simulation.addNAT().configDefault() # We can start the simulation print "Starting the simulation..." simulation.start() # For each host for host in simulation.hosts: # We set the jitter (It can only be done after the simulation was started, not from the Topology) host.defaultIntf().config(jitter=("%dms" % localJitter)) # we create a dictionnary to store the popen popens = {} # for each server for server in simulation.hosts: if "vessel" in server.name: # We open a xterm and start the server self.startServer(server, nbOfServersPerRegion * nbOfRegions) # We also start the Command Line Interface of Mininet CLI(simulation) # Once the CLI is closed (with exit), we can stop the simulation print "Stopping the simulation NOW!" simulation.stop() # We close the xterms (mininet.term.cleanUpScreens) cleanUpScreens()
def myNetwork(with_windows=False): net = Mininet(topo=None, build=False, ipBase='10.0.0.0/8') info('*** Adding controller\n') c0 = net.addController(name='c0', controller=Controller, protocol='tcp', port=6633) info('*** Add switches\n') s1 = net.addSwitch('s1', cls=OVSKernelSwitch) s2 = net.addSwitch('s2', cls=OVSKernelSwitch) s3 = net.addSwitch('s3', cls=OVSKernelSwitch) info('*** Add hosts\n') h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None) h4 = net.addHost('h4', cls=Host, ip='10.0.0.4', defaultRoute=None) h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None) h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None) info('*** Add links\n') net.addLink(h1, s1) net.addLink(s1, s2) net.addLink(s2, h2) net.addLink(s2, s3) net.addLink(s3, h3) net.addLink(s3, h4) info('*** Starting network\n') net.build() info('*** Starting controllers\n') for controller in net.controllers: controller.start() info('*** Starting switches\n') net.get('s1').start([c0]) net.get('s2').start([c0]) net.get('s3').start([c0]) info('*** Post configure switches and hosts\n') net.pingAll() # s1 s1.dpctl('add-flow', 'priority=5,in_port=1,actions=output:2', '') s1.dpctl('add-flow', 'priority=5,in_port=2,actions=output:1', '') info('flows s1: ' + s1.dpctl('dump-flows', '', '')) # s2, s3 flow_1_2 = 'priority=255,ip,in_port=1,nw_src=10.0.0.1,nw_dst=10.0.0.4,actions=output:2' flow_2_3 = 'priority=255,ip,in_port=2,nw_src=10.0.0.1,nw_dst=10.0.0.4,actions=output:3' flow_3_2 = 'priority=255,ip,in_port=3,nw_src=10.0.0.4,nw_dst=10.0.0.1,actions=output:2' flow_2_1 = 'priority=255,ip,in_port=2,nw_src=10.0.0.4,nw_dst=10.0.0.1,actions=output:1' s2.dpctl('add-flow', flow_1_2, '') s2.dpctl('add-flow', flow_2_3, '') s2.dpctl('add-flow', flow_3_2, '') s2.dpctl('add-flow', flow_2_1, '') s3.dpctl('add-flow', flow_1_2, '') s3.dpctl('add-flow', flow_2_3, '') s3.dpctl('add-flow', flow_3_2, '') s3.dpctl('add-flow', flow_2_1, '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.1,actions=output:1', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.2,actions=output:2', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.3,actions=output:3', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.4,actions=output:3', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.1,actions=output:1', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.2,actions=output:1', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.3,actions=output:2', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.4,actions=output:3', '') """<<< Just in case asymmetric flows were needed # s2, s3 flow_1_2 = 'priority=255,in_port=1,actions=output:2' flow_2_3 = 'priority=255,in_port=2,actions=output:3' flow_3_1 = 'priority=255,in_port=3,actions=output:1' s2.dpctl('add-flow', flow_1_2, '') s2.dpctl('add-flow', flow_2_3, '') s2.dpctl('add-flow', flow_3_1, '') s3.dpctl('add-flow', flow_1_2, '') s3.dpctl('add-flow', flow_2_3, '') s3.dpctl('add-flow', flow_3_1, '') <<<<""" info('flows s2: ' + s1.dpctl('dump-flows', '', '')) info('flows s3: ' + s1.dpctl('dump-flows', '', '')) # net.pingAll() if (with_windows): cleanUpScreens() execInXterm([h2, h3], './sf_hhe.sh; read') execInXterm([h4], './server.sh') execInXterm([h4], './tcp_dump.sh') execInXterm([h1], './tcp_dump.sh') execInXterm([h2], './tcp_dump.sh') execInXterm([h3], './tcp_dump.sh') execInXterm([h1], 'xterm') ## wireshark cmd = 'wireshark' opts = '-i h1-eth0 -k' h1.cmd(cmd + ' ' + opts + '&') opts = '-i h2-eth0 -k' h2.cmd(cmd + ' ' + opts + '&') opts = '-i h3-eth0 -k' h3.cmd(cmd + ' ' + opts + '&') opts = '-i h4-eth0 -k' h4.cmd(cmd + ' ' + opts + '&') time.sleep(1) else: h4.sendCmd('./server.sh') time.sleep(1) h3.sendCmd('./sf_hhe.sh') time.sleep(1) h2.sendCmd('./sf_hhe.sh') time.sleep(1) #h1.sendCmd('./tcp_dump.sh > h1_tcp_dump.txt') h1.cmd('./client.sh') time.sleep(1) h1.sendCmd('python3 ../sf_hhe/HttpClient.py -ip 10.0.0.4') output(h1.waitOutput()) if (with_windows): execInXterm([h1], './client.sh ; read') CLI(net) cleanUpScreens() else: h1.cleanup() h2.cleanup() h3.cleanup() h4.cleanup() net.stop()
def cleanup( cls): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info( "*** Removing excess controllers/ofprotocols/ofdatapaths/" "pings/noxes\n" ) zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh( 'killall ' + zombies + ' 2> /dev/null' ) time.sleep( 1 ) sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) # And kill off sudo mnexec sh( 'pkill -9 -f "sudo mnexec"') info( "*** Removing junk from /tmp\n" ) sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' ) info( "*** Removing old X11 tunnels\n" ) cleanUpScreens() info( "*** Removing excess kernel datapaths\n" ) dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).splitlines() for dp in dps: if dp: sh( 'dpctl deldp ' + dp ) info( "*** Removing OVS datapaths\n" ) dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh( "ovs-vsctl " + " -- ".join( "--if-exists del-br " + dp for dp in dps if dp ) ) # And in case the above didn't work... dps = sh( "ovs-vsctl --timeout=1 list-br" ).strip().splitlines() for dp in dps: sh( 'ovs-vsctl del-br ' + dp ) info( "*** Removing all links of the pattern foo-ethX\n" ) links = sh( "ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'" ).splitlines() # Delete blocks of links n = 1000 # chunk size for i in xrange( 0, len( links ), n ): cmd = ';'.join( 'ip link del %s' % link for link in links[ i : i + n ] ) sh( '( %s ) 2> /dev/null' % cmd ) if 'tap9' in sh( 'ip link show' ): info( "*** Removing tap9 - assuming it's from cluster edition\n" ) sh( 'ip link del tap9' ) info( "*** Killing stale mininet node processes\n" ) killprocs( 'mininet:' ) info( "*** Shutting down stale tunnels\n" ) killprocs( 'Tunnel=Ethernet' ) killprocs( '.ssh/mn') sh( 'rm -f ~/.ssh/mn/*' ) # Call any additional cleanup code if necessary for callback in cls.callbacks: callback() # Containernet should also cleanup pending Docker cmd = "docker rm -f $( docker ps --filter 'label=com.containernet' -a -q)" call(cmd, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb')) # cleanup any remaining iptables rules from external SAPs with NAT # we use iptc module to iterate through the loops, but due to a bug, we cannot use iptc to delete the rules # we rely on iptables CLI to delete the found rules info("*** Removing SAP NAT rules\n") table = iptc.Table(iptc.Table.NAT) chain = iptc.Chain(table, 'POSTROUTING') for rule in chain.rules: if SAP_PREFIX in str(rule.out_interface): src_CIDR = str(ipaddress.IPv4Network(unicode(rule.src))) rule0_ = "iptables -t nat -D POSTROUTING ! -o {0} -s {1} -j MASQUERADE".\ format(rule.out_interface.strip('!'), src_CIDR) p = Popen(shlex.split(rule0_)) p.communicate() info("delete NAT rule from SAP: {1} - {0} - {2}\n".format(rule.out_interface, rule.in_interface, src_CIDR)) table = iptc.Table(iptc.Table.FILTER) chain = iptc.Chain(table, 'FORWARD') for rule in chain.rules: src_CIDR = str(ipaddress.IPv4Network(unicode(rule.src))) if SAP_PREFIX in str(rule.out_interface): rule1_ = "iptables -D FORWARD -o {0} -j ACCEPT".format(rule.out_interface) p = Popen(shlex.split(rule1_)) p.communicate() info("delete FORWARD rule from SAP: {1} - {0} - {2}\n".format(rule.out_interface, rule.in_interface, src_CIDR)) if SAP_PREFIX in str(rule.in_interface): rule2_ = "iptables -D FORWARD -i {0} -j ACCEPT".format(rule.in_interface) p = Popen(shlex.split(rule2_)) p.communicate() info("delete FORWARD rule from SAP: {1} - {0} - {2}\n".format(rule.out_interface, rule.in_interface, src_CIDR)) info( "*** Cleanup complete.\n" )
def cleanup(cls): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/" "pings/noxes\n") zombies = ('controller ofprotocol ofdatapath ping nox_core' 'lt-nox_core ovs-openflowd ovs-controller' 'ovs-testcontroller udpbwtest mnexec ivs ryu-manager') # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh('killall ' + zombies + ' 2> /dev/null') time.sleep(1) sh('killall -9 ' + zombies + ' 2> /dev/null') # And kill off sudo mnexec sh('pkill -9 -f "sudo mnexec"') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log') info("*** Removing old X11 tunnels\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").splitlines() for dp in dps: if dp: sh('dpctl deldp ' + dp) info("*** Removing OVS datapaths\n") dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh("ovs-vsctl " + " -- ".join("--if-exists del-br " + dp for dp in dps if dp)) # And in case the above didn't work... dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() for dp in dps: sh('ovs-vsctl del-br ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh( "ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'").splitlines() # Delete blocks of links n = 1000 # chunk size for i in range(0, len(links), n): cmd = ';'.join('ip link del %s' % link for link in links[i:i + n]) sh('( %s ) 2> /dev/null' % cmd) if 'tap9' in sh('ip link show'): info("*** Removing tap9 - assuming it's from cluster edition\n") sh('ip link del tap9') info("*** Killing stale mininet node processes\n") killprocs('mininet:') info("*** Shutting down stale tunnels\n") killprocs('Tunnel=Ethernet') killprocs('.ssh/mn') sh('rm -f ~/.ssh/mn/*') # Call any additional cleanup code if necessary for callback in cls.callbacks: callback() info("*** Cleanup complete.\n")
def run(): " Creates the virtual environment, by starting the network and configuring debug information " info('** Creating an instance of Lab5 network topology\n') global net global hosts # We specify the OVSSwitch for better IPv6 performance # We use mininext constructor with the instance of the network, the default controller and the customized openvswitch net = Mininet(intf=TCIntf) info('\n** Adding Controller\n') net.addController('c0') info('\n** Adding Hosts\n') h1 = net.addHost('h1', ip='10.10.0.1/24', hostname='h1', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) # Space to add any commands for configuring the IP addresses h2 = net.addHost('h2', ip='10.10.0.2/24', hostname='h2', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) h3 = net.addHost('h3', ip='10.10.1.3/24', hostname='h3', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) r1 = net.addHost('r1', ip='10.10.0.10/24', hostname='r1', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) # # info('\n** Adding Switches\n') # Adding switches to the network sw1 = net.addSwitch('sw1') sw2 = net.addSwitch('sw2') info('\n** Creating Links \n') link_h1sw1 = net.addLink(h1, sw1) link_h2sw1 = net.addLink(h2, sw1) link_h3sw2 = net.addLink(h3, sw2) link_r1sw1 = net.addLink(r1, sw1, intfName1='r1-eth0') link_r1sw2 = net.addLink(r1, sw2, intfName1='r1-eth1') info('\n** Modifying Link Parameters \n') """ Default parameters for links: bw = None, delay = None, jitter = None, loss = None, disable_gro = True, speedup = 0, use_hfsc = False, use_tbf = False, latency_ms = None, enable_ecn = False, enable_red = False, max_queue_size = None """ link_r1sw2.intf1.config(bw=5, enable_red=True, enable_ecn=True) net.start() info('*** Configuring hosts\n') r1.cmd('ifconfig r1-eth1 10.10.1.10 netmask 255.255.255.0') r1.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') # h1.cmd('ip route add 10.10.1.0/24 via 10.10.0.10') h2.cmd('ip route add 10.10.1.0/24 via 10.10.0.10') h3.cmd('ip route add 10.10.0.0/24 via 10.10.1.10') info('** Executing custom commands\n') output = net.nameToNode.keys #Enable Xterm window for every host info('** Enabling xterm for hosts only\n') # We check if the display is available hosts = [h1, h2, h3, r1] if 'DISPLAY' not in os.environ: error("Error starting terms: Cannot connect to display\n") return # Remove previous (and possible non-used) socat X11 tunnels cleanUpScreens() # Mininet's function to create Xterms in hosts makeTerms(hosts, 'host') # Enable the mininet> prompt info('** Running CLI\n') CLI(net) info('*** Closing the terminals on the hosts\n') h1.cmd("killall xterm") h2.cmd("killall xterm") h3.cmd("killall xterm") r1.cmd("killall xterm") # This command stops the simulation net.stop() cleanUpScreens()
def cleanup( cls): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info( "*** Removing excess controllers/ofprotocols/ofdatapaths/" "pings/noxes\n" ) zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh( 'killall ' + zombies + ' 2> /dev/null' ) time.sleep( 1 ) sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) # And kill off sudo mnexec sh( 'pkill -9 -f "sudo mnexec"') info( "*** Removing junk from /tmp\n" ) sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' ) info( "*** Removing old X11 tunnels\n" ) cleanUpScreens() info( "*** Removing excess kernel datapaths\n" ) dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).splitlines() for dp in dps: if dp: sh( 'dpctl deldp ' + dp ) info( "*** Removing WiFi module and Configurations\n" ) sh( 'rmmod mac80211_hwsim' ) sh( 'killall -9 hostapd' ) if(os.path.exists('ap.conf')): sh( 'rm ap.conf' ) info( "*** Removing OVS datapaths\n" ) dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh( "ovs-vsctl " + " -- ".join( "--if-exists del-br " + dp for dp in dps if dp ) ) # And in case the above didn't work... dps = sh( "ovs-vsctl --timeout=1 list-br" ).strip().splitlines() for dp in dps: sh( 'ovs-vsctl del-br ' + dp ) info( "*** Removing all links of the pattern foo-ethX\n" ) links = sh( "ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'" ).splitlines() # Delete blocks of links n = 1000 # chunk size for i in xrange( 0, len( links ), n ): cmd = ';'.join( 'ip link del %s' % link for link in links[ i : i + n ] ) sh( '( %s ) 2> /dev/null' % cmd ) if 'tap9' in sh( 'ip link show' ): info( "*** Removing tap9 - assuming it's from cluster edition\n" ) sh( 'ip link del tap9' ) info( "*** Killing stale mininet node processes\n" ) killprocs( 'mininet:' ) info( "*** Shutting down stale tunnels\n" ) killprocs( 'Tunnel=Ethernet' ) killprocs( '.ssh/mn') sh( 'rm -f ~/.ssh/mn/*' ) # Call any additional cleanup code if necessary for callback in cls.callbacks: callback() info( "*** Cleanup complete.\n" )
def cleanup(cls): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/" "pings/noxes\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh('killall ' + zombies + ' 2> /dev/null') time.sleep(1) sh('killall -9 ' + zombies + ' 2> /dev/null') # And kill off sudo mnexec sh('pkill -9 -f "sudo mnexec"') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log') info("*** Removing old X11 tunnels\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").splitlines() for dp in dps: if dp: sh('dpctl deldp ' + dp) info("*** Removing WiFi module and Configurations\n") try: (subprocess.check_output("lsmod | grep mac80211_hwsim", shell=True)) os.system('rmmod mac80211_hwsim') except: pass try: (subprocess.check_output("lsmod | grep ifb", shell=True)) os.system('rmmod ifb') except: pass try: h = sh('ps -aux | grep -ic hostpad') if h >= 2: sh('killall -9 hostapd') except: pass if glob.glob("*.apconf"): os.system('rm *.apconf') if glob.glob("*wifiDirect.conf"): os.system('rm *wifiDirect.conf') try: h = subprocess.check_output( "ps -aux | grep -ic \'wpa_supplicant -B -Dnl80211\'", shell=True) if h >= 2: os.system('pkill -f \'wpa_supplicant -B -Dnl80211\'') except: pass info("*** Removing OVS datapaths\n") dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh("ovs-vsctl " + " -- ".join("--if-exists del-br " + dp for dp in dps if dp)) # And in case the above didn't work... dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() for dp in dps: sh('ovs-vsctl del-br ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh( "ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'").splitlines() # Delete blocks of links n = 1000 # chunk size for i in range(0, len(links), n): cmd = ';'.join('ip link del %s' % link for link in links[i:i + n]) sh('( %s ) 2> /dev/null' % cmd) if 'tap9' in sh('ip link show'): info("*** Removing tap9 - assuming it's from cluster edition\n") sh('ip link del tap9') info("*** Killing stale mininet node processes\n") killprocs('mininet:') info("*** Shutting down stale tunnels\n") killprocs('Tunnel=Ethernet') killprocs('.ssh/mn') sh('rm -f ~/.ssh/mn/*') info("*** Killing wmediumd\n") sh('pkill wmediumd') # Call any additional cleanup code if necessary for callback in cls.callbacks: callback() info("*** Cleanup complete.\n")
def run(): " Creates the virtual environment, by starting the network and configuring debug information " info('** Creating an instance of Lab5 network topology\n') global net global hosts net = Mininet(intf=TCIntf) info('\n** Adding Controller\n') net.addController('c0') info('\n** Adding Hosts\n') h1 = net.addHost('h1', ip='10.0.0.1/24', hostname='h1', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) h2 = net.addHost('h2', ip='10.0.0.2/24', hostname='h2', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) h3 = net.addHost('h3', ip='10.0.3.3/24', hostname='h3', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) r1 = net.addHost('r1', ip='10.0.0.10/24', hostname='r1', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) r2 = net.addHost('r2', ip='10.0.1.20/24', hostname='r2', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) r3 = net.addHost('r3', ip='10.0.2.30/24', hostname='r3', privateLogDir=True, privateRunDir=True, inMountNamespace=True, inPIDNamespace=True, inUTSNamespace=True) info('\n** Adding Switches\n') # Adding switches to the network sw1 = net.addSwitch('sw1') sw2 = net.addSwitch('sw2') sw3 = net.addSwitch('sw3') sw4 = net.addSwitch('sw4') info('\n** Creating Links \n') link_h1sw1 = net.addLink(h1, sw1) link_h2sw1 = net.addLink(h2, sw1) link_h3sw4 = net.addLink(h3, sw4) link_r1sw1 = net.addLink(r1, sw1, intfName1='r1-eth0') link_r1sw2 = net.addLink(r1, sw2, intfName1='r1-eth1') link_r2sw2 = net.addLink(r2, sw2, intfName1='r2-eth0') link_r2sw3 = net.addLink(r2, sw3, intfName1='r2-eth1') link_r3sw3 = net.addLink(r3, sw3, intfName1='r3-eth0') link_r3sw4 = net.addLink(r3, sw4, intfName1='r3-eth1') info('\n** Modifying Link Parameters \n') """ Default parameters for links: bw = None, delay = None, jitter = None, loss = None, disable_gro = True, speedup = 0, use_hfsc = False, use_tbf = False, latency_ms = None, enable_ecn = False, enable_red = False, max_queue_size = None """ net.start() info('*** Configuring hosts\n') r1.cmd('ifconfig r1-eth1 10.0.1.10 netmask 255.255.255.0') r1.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') r2.cmd('ifconfig r2-eth1 10.0.2.20 netmask 255.255.255.0') r2.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') r3.cmd('ifconfig r3-eth1 10.0.3.30 netmask 255.255.255.0') r3.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward') # Space to add any customize command before prompting command line # # # # r1.cmd( 'tc qdisc add dev r1-eth1 root handle 1: cbq avpkt 1000 bandwidth 60mbit' ) r1.cmd( 'tc class add dev r1-eth1 parent 1: classid 1:1 cbq rate 2mbit allot 1500 prio 5 bounded isolated' ) r1.cmd( 'tc class add dev r1-eth1 parent 1: classid 1:2 cbq rate 20mbit allot 1500 prio 5 bounded isolated' ) r1.cmd( 'tc filter add dev r1-eth1 protocol ip parent 1: prio 16 u32 match ip src 10.0.0.1 flowid 1:1' ) r1.cmd( 'tc filter add dev r1-eth1 protocol ip parent 1: prio 16 u32 match ip src 10.0.0.2 flowid 1:2' ) # Space to add any customize command before prompting command line # # # # info('** Executing custom commands\n') output = net.nameToNode.keys #Enable Xterm window for every host info('** Enabling xterm for hosts only\n') # We check if the display is available hosts = [h1, h2, h3, r1, r2, r3] if 'DISPLAY' not in os.environ: error("Error starting terms: Cannot connect to display\n") return # Remove previous (and possible non-used) socat X11 tunnels cleanUpScreens() # Mininet's function to create Xterms in hosts makeTerms(hosts, 'host') # Enable the mininet> prompt info('** Running CLI\n') CLI(net) r1.cmd("killall xterm") r2.cmd("killall xterm") r3.cmd("killall xterm") h1.cmd("killall xterm") h2.cmd("killall xterm") h3.cmd("killall xterm") # This command stops the simulation net.stop() cleanUpScreens()
def stop_xterm(self): for term in self.terms: os.kill(term.pid, signal.SIGKILL) cleanUpScreens()
def stopXterms(self): "Kill each xterm." for term in self.terms: os.kill(term.pid, signal.SIGKILL) cleanUpScreens()
def cleanup(cls): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/" "pings/noxes\n") zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core ' zombies += 'ovs-openflowd ovs-controller udpbwtest mnexec ivs' # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh('killall ' + zombies + ' 2> /dev/null') time.sleep(1) sh('killall -9 ' + zombies + ' 2> /dev/null') # And kill off sudo mnexec sh('pkill -9 -f "sudo mnexec"') info("*** Removing junk from /tmp\n") sh('rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log') info("*** Removing old X11 tunnels\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").splitlines() for dp in dps: if dp: sh('dpctl deldp ' + dp) info("*** Removing OVS datapaths\n") dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh("ovs-vsctl " + " -- ".join("--if-exists del-br " + dp for dp in dps if dp)) # And in case the above didn't work... dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() for dp in dps: sh('ovs-vsctl del-br ' + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh( "ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'").splitlines() # Delete blocks of links n = 1000 # chunk size for i in xrange(0, len(links), n): cmd = ';'.join('ip link del %s' % link for link in links[i:i + n]) sh('( %s ) 2> /dev/null' % cmd) if 'tap9' in sh('ip link show'): info("*** Removing tap9 - assuming it's from cluster edition\n") sh('ip link del tap9') info("*** Killing stale mininet node processes\n") killprocs('mininet:') info("*** Shutting down stale tunnels\n") killprocs('Tunnel=Ethernet') killprocs('.ssh/mn') sh('rm -f ~/.ssh/mn/*') # Call any additional cleanup code if necessary for callback in cls.callbacks: callback() # Containernet should also cleanup pending Docker cmd = "docker rm -f $( docker ps --filter 'label=com.containernet' -a -q)" call(cmd, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb')) # cleanup pending virtual machines if libvirt is available using virsh instead of libvirt-python # as no parameters are available if LIBVIRT_AVAILABLE: info("*** Removing pending LibvirtHosts\n") # TODO remember possible snapshots! cmd = "list --transient --title" for domain in virsh(cmd).splitlines(): match = re.search("^\s*[\d]+\s+(\S+).*com.containernet.*", domain) if match is not None: cmd = "destroy %s" % match.group(1) virsh(cmd) info( "*** Removing the default libvirt management network (mn.libvirt.mgmt)\n" ) cmd = "net-list --transient --name" for net in virsh(cmd).splitlines(): if "mn.libvirt.mgmt" in net: virsh("net-destroy mn.libvirt.mgmt") # cleanup any remaining iptables rules from external SAPs with NAT # we use iptc module to iterate through the loops, but due to a bug, we cannot use iptc to delete the rules # we rely on iptables CLI to delete the found rules info("*** Removing SAP NAT rules\n") table = iptc.Table(iptc.Table.NAT) chain = iptc.Chain(table, 'POSTROUTING') for rule in chain.rules: if SAP_PREFIX in str(rule.out_interface): src_CIDR = str(ipaddress.IPv4Network(unicode(rule.src))) rule0_ = "iptables -t nat -D POSTROUTING ! -o {0} -s {1} -j MASQUERADE".\ format(rule.out_interface.strip('!'), src_CIDR) p = Popen(shlex.split(rule0_)) p.communicate() info("delete NAT rule from SAP: {1} - {0} - {2}\n".format( rule.out_interface, rule.in_interface, src_CIDR)) table = iptc.Table(iptc.Table.FILTER) chain = iptc.Chain(table, 'FORWARD') for rule in chain.rules: src_CIDR = str(ipaddress.IPv4Network(unicode(rule.src))) if SAP_PREFIX in str(rule.out_interface): rule1_ = "iptables -D FORWARD -o {0} -j ACCEPT".format( rule.out_interface) p = Popen(shlex.split(rule1_)) p.communicate() info("delete FORWARD rule from SAP: {1} - {0} - {2}\n".format( rule.out_interface, rule.in_interface, src_CIDR)) if SAP_PREFIX in str(rule.in_interface): rule2_ = "iptables -D FORWARD -i {0} -j ACCEPT".format( rule.in_interface) p = Popen(shlex.split(rule2_)) p.communicate() info("delete FORWARD rule from SAP: {1} - {0} - {2}\n".format( rule.out_interface, rule.in_interface, src_CIDR)) info("*** Cleanup complete.\n")
def stop( self ): "Stop network." if self.net is not None: self.net.stop() cleanUpScreens() self.net = None
def myNetwork(with_windows = False): net = Mininet( topo=None, build=False, ipBase='10.0.0.0/8') info( '*** Adding controller\n' ) c0=net.addController(name='c0', controller=Controller, protocol='tcp', port=6633) info( '*** Add switches\n') s1 = net.addSwitch('s1', cls=OVSKernelSwitch) s2 = net.addSwitch('s2', cls=OVSKernelSwitch) s3 = net.addSwitch('s3', cls=OVSKernelSwitch) s4 = net.addSwitch('s4', cls=OVSKernelSwitch) info( '*** Add hosts\n') h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None) hc1 = net.addHost('hc1', cls=Host, ip='10.0.0.5', defaultRoute=None) h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None) h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None) hc4 = net.addHost('hc4', cls=Host, ip='10.0.0.6', defaultRoute=None) h4 = net.addHost('h4', cls=Host, ip='10.0.0.4', defaultRoute=None) info( '*** Add links\n') net.addLink(h1, s1) net.addLink(s1, hc1) net.addLink(s1, s2) net.addLink(s2, h2) net.addLink(s2, s3) net.addLink(s3, h3) net.addLink(s3, s4) net.addLink(s4, hc4) net.addLink(s4, h4) info( '*** Starting network\n') net.build() info( '*** Starting controllers\n') for controller in net.controllers: controller.start() info( '*** Starting switches\n') net.get('s1').start([c0]) net.get('s2').start([c0]) net.get('s3').start([c0]) net.get('s4').start([c0]) info( '*** Post configure switches and hosts\n') net.pingAll() mac_h1 = (h1.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_hc1 = (hc1.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_h2 = (h2.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_h3 = (h3.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_hc4 = (hc4.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_h4 = (h4.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] print("mac_h1(" + mac_h1 + ")" ) print("mac_hc1(" + mac_hc1 + ")" ) print("mac_h2(" + mac_h2 + ")" ) print("mac_h3(" + mac_h3 + ")" ) print("mac_hc4(" + mac_hc4 + ")" ) print("mac_h4(" + mac_h4 + ")" ) ## h1-h4 -> hc1-h4 -> h4-hc1 -> hc1-h4 -> h1-h4 ## h4-h1 <- hc4-h1 <- h1-hc4 <- hc4-h1 <- h4-h1 # s1 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_h1 + ',dl_dst=' + mac_h4 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_hc1 + ',dl_dst=' + mac_h4 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_h4 + ',dl_dst=' + mac_h1 + ',,actions=output:1' ##???s1.dpctl('add-flow', flow_1_2_mac, '') s1.dpctl('add-flow', flow_2_3_mac, '') s1.dpctl('add-flow', flow_3_2_mac, '') s1.dpctl('add-flow', flow_2_1_mac, '') # s2 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_hc1 + ',dl_dst=' + mac_h4 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_h4 + ',dl_dst=' + mac_hc1 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_h1 + ',dl_dst=' + mac_hc4 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',,actions=output:1' s2.dpctl('add-flow', flow_1_2_mac, '') s2.dpctl('add-flow', flow_2_3_mac, '') s2.dpctl('add-flow', flow_3_2_mac, '') s2.dpctl('add-flow', flow_2_1_mac, '') # s3 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_h4 + ',dl_dst=' + mac_hc1 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_h1 + ',dl_dst=' + mac_h4 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_h1 + ',dl_dst=' + mac_hc4 + ',,actions=output:1' s3.dpctl('add-flow', flow_1_2_mac, '') s3.dpctl('add-flow', flow_2_3_mac, '') s3.dpctl('add-flow', flow_3_2_mac, '') s3.dpctl('add-flow', flow_2_1_mac, '') # s4 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_hc1 + ',dl_dst=' + mac_h4 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_h1 + ',dl_dst=' + mac_h4 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_h4 + ',dl_dst=' + mac_h1 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',,actions=output:1' s4.dpctl('add-flow', flow_1_2_mac, '') s4.dpctl('add-flow', flow_2_3_mac, '') s4.dpctl('add-flow', flow_3_2_mac, '') s4.dpctl('add-flow', flow_2_1_mac, '') # s1, s2, s3, s4 (ip) flow_1_2 = 'priority=255,ip,in_port=1,nw_src=10.0.0.1,nw_dst=10.0.0.4,actions=output:2' flow_2_3 = 'priority=255,ip,in_port=2,nw_src=10.0.0.1,nw_dst=10.0.0.4,actions=output:3' flow_3_2 = 'priority=255,ip,in_port=3,nw_src=10.0.0.4,nw_dst=10.0.0.1,actions=output:2' flow_2_1 = 'priority=255,ip,in_port=2,nw_src=10.0.0.4,nw_dst=10.0.0.1,actions=output:1' s1.dpctl('add-flow', flow_1_2, '') s1.dpctl('add-flow', flow_2_3, '') s1.dpctl('add-flow', flow_3_2, '') s1.dpctl('add-flow', flow_2_1, '') s2.dpctl('add-flow', flow_1_2, '') s2.dpctl('add-flow', flow_2_3, '') s2.dpctl('add-flow', flow_3_2, '') s2.dpctl('add-flow', flow_2_1, '') s3.dpctl('add-flow', flow_1_2, '') s3.dpctl('add-flow', flow_2_3, '') s3.dpctl('add-flow', flow_3_2, '') s3.dpctl('add-flow', flow_2_1, '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.1,actions=output:1', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.2,actions=output:2', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.3,actions=output:3', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.4,actions=output:3', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.1,actions=output:1', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.2,actions=output:1', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.3,actions=output:2', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.4,actions=output:3', '') info( 'flows s1: ' + s1.dpctl('dump-flows', '', '') ) info( 'flows s2: ' + s1.dpctl('dump-flows', '', '') ) info( 'flows s3: ' + s1.dpctl('dump-flows', '', '') ) info( 'flows s4: ' + s1.dpctl('dump-flows', '', '') ) # net.pingAll() if (with_windows): cleanUpScreens() execInXterm([h2, h3], './sf_hhe.sh; read') execInXterm([hc1, hc4], './sf_eth_nsh.sh; read') execInXterm([h4], './server.sh') execInXterm([h4], './tcp_dump.sh') execInXterm([h1, hc1, h2, h3, hc4, h4], './tcp_dump.sh') execInXterm([h1], 'xterm') ## wireshark cmd='wireshark' opts='-i h1-eth0 -k' h1.cmd( cmd + ' ' + opts + '&' ) opts='-i hc1-eth0 -k' hc1.cmd( cmd + ' ' + opts + '&' ) opts='-i h2-eth0 -k' h2.cmd( cmd + ' ' + opts + '&' ) opts='-i h3-eth0 -k' h3.cmd( cmd + ' ' + opts + '&' ) opts='-i hc4-eth0 -k' hc4.cmd( cmd + ' ' + opts + '&' ) opts='-i h4-eth0 -k' h4.cmd( cmd + ' ' + opts + '&' ) time.sleep(1) else: h4.sendCmd('./server.sh') time.sleep(1) hc4.sendCmd('./sf_eth_nsh.sh') time.sleep(1) h3.sendCmd('./sf_hhe.sh') time.sleep(1) h2.sendCmd('./sf_hhe.sh') time.sleep(1) hc1.sendCmd('./sf_eth_nsh.sh') time.sleep(1) #h1.sendCmd('./tcp_dump.sh > h1_tcp_dump.txt') h1.cmd('./client.sh') time.sleep(1) h1.sendCmd('python3 ../sf_hhe/HttpClient.py -ip 10.0.0.4') output( h1.waitOutput() ) if (with_windows): execInXterm([h1], './client.sh ; read') CLI(net) cleanUpScreens() else: h1.cleanup() hc1.cleanup() h2.cleanup() h3.cleanup() hc4.cleanup() h4.cleanup() net.stop()
def myNetwork(with_windows=False): net = Mininet(topo=None, build=False, ipBase='10.0.0.0/8') info('*** Adding controller\n') c0 = net.addController(name='c0', controller=Controller, protocol='tcp', port=6633) info('*** Add switches\n') s1 = net.addSwitch('s1', cls=OVSKernelSwitch) s2 = net.addSwitch('s2', cls=OVSKernelSwitch) s3 = net.addSwitch('s3', cls=OVSKernelSwitch) s4 = net.addSwitch('s4', cls=OVSKernelSwitch) info('*** Add hosts\n') h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None) hc1 = net.addHost('hc1', cls=Host, ip='10.0.0.5', defaultRoute=None) h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None) h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None) hc4 = net.addHost('hc4', cls=Host, ip='10.0.0.6', defaultRoute=None) h4 = net.addHost('h4', cls=Host, ip='10.0.0.4', defaultRoute=None) info('*** Add links\n') net.addLink(h1, s1) net.addLink(s1, hc1) net.addLink(s1, s2) net.addLink(s2, h2) net.addLink(s2, s3) net.addLink(s3, h3) net.addLink(s3, s4) net.addLink(s4, hc4) net.addLink(s4, h4) info('*** Starting network\n') net.build() info('*** Starting controllers\n') for controller in net.controllers: controller.start() info('*** Starting switches\n') net.get('s1').start([c0]) net.get('s2').start([c0]) net.get('s3').start([c0]) net.get('s4').start([c0]) info('*** Post configure switches and hosts\n') net.pingAll() mac_h1 = (h1.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_hc1 = (hc1.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_h2 = (h2.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_h3 = (h3.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_hc4 = (hc4.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] mac_h4 = (h4.cmd("ifconfig | grep HWaddr | awk '{print $5}'"))[:17] print("mac_h1(" + mac_h1 + ")") print("mac_hc1(" + mac_hc1 + ")") print("mac_h2(" + mac_h2 + ")") print("mac_h3(" + mac_h3 + ")") print("mac_hc4(" + mac_hc4 + ")") print("mac_h4(" + mac_h4 + ")") ## h1-h4 -> hc1-h4 -> h4-hc1 -> hc1-h4 -> h1-h4 ## h4-h1 <- hc4-h1 <- h1-hc4 <- hc4-h1 <- h4-h1 # s1 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_h1 + ',dl_dst=' + mac_h4 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_hc1 + ',dl_dst=' + mac_h4 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_h4 + ',dl_dst=' + mac_h1 + ',,actions=output:1' ##???s1.dpctl('add-flow', flow_1_2_mac, '') s1.dpctl('add-flow', flow_2_3_mac, '') s1.dpctl('add-flow', flow_3_2_mac, '') s1.dpctl('add-flow', flow_2_1_mac, '') # s2 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_hc1 + ',dl_dst=' + mac_h4 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_h4 + ',dl_dst=' + mac_hc1 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_h1 + ',dl_dst=' + mac_hc4 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',,actions=output:1' s2.dpctl('add-flow', flow_1_2_mac, '') s2.dpctl('add-flow', flow_2_3_mac, '') s2.dpctl('add-flow', flow_3_2_mac, '') s2.dpctl('add-flow', flow_2_1_mac, '') # s3 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_h4 + ',dl_dst=' + mac_hc1 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_h1 + ',dl_dst=' + mac_h4 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_h1 + ',dl_dst=' + mac_hc4 + ',,actions=output:1' s3.dpctl('add-flow', flow_1_2_mac, '') s3.dpctl('add-flow', flow_2_3_mac, '') s3.dpctl('add-flow', flow_3_2_mac, '') s3.dpctl('add-flow', flow_2_1_mac, '') # s4 (mac) flow_1_2_mac = 'priority=255,in_port=1,dl_src=' + mac_hc1 + ',dl_dst=' + mac_h4 + ',actions=output:2' flow_2_3_mac = 'priority=255,in_port=2,dl_src=' + mac_h1 + ',dl_dst=' + mac_h4 + ',actions=output:3' flow_3_2_mac = 'priority=255,in_port=3,dl_src=' + mac_h4 + ',dl_dst=' + mac_h1 + ',actions=output:2' flow_2_1_mac = 'priority=255,in_port=2,dl_src=' + mac_hc4 + ',dl_dst=' + mac_h1 + ',,actions=output:1' s4.dpctl('add-flow', flow_1_2_mac, '') s4.dpctl('add-flow', flow_2_3_mac, '') s4.dpctl('add-flow', flow_3_2_mac, '') s4.dpctl('add-flow', flow_2_1_mac, '') # s1, s2, s3, s4 (ip) flow_1_2 = 'priority=255,ip,in_port=1,nw_src=10.0.0.1,nw_dst=10.0.0.4,actions=output:2' flow_2_3 = 'priority=255,ip,in_port=2,nw_src=10.0.0.1,nw_dst=10.0.0.4,actions=output:3' flow_3_2 = 'priority=255,ip,in_port=3,nw_src=10.0.0.4,nw_dst=10.0.0.1,actions=output:2' flow_2_1 = 'priority=255,ip,in_port=2,nw_src=10.0.0.4,nw_dst=10.0.0.1,actions=output:1' s1.dpctl('add-flow', flow_1_2, '') s1.dpctl('add-flow', flow_2_3, '') s1.dpctl('add-flow', flow_3_2, '') s1.dpctl('add-flow', flow_2_1, '') s2.dpctl('add-flow', flow_1_2, '') s2.dpctl('add-flow', flow_2_3, '') s2.dpctl('add-flow', flow_3_2, '') s2.dpctl('add-flow', flow_2_1, '') s3.dpctl('add-flow', flow_1_2, '') s3.dpctl('add-flow', flow_2_3, '') s3.dpctl('add-flow', flow_3_2, '') s3.dpctl('add-flow', flow_2_1, '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.1,actions=output:1', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.2,actions=output:2', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.3,actions=output:3', '') s2.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.4,actions=output:3', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.1,actions=output:1', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.2,actions=output:1', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.3,actions=output:2', '') s3.dpctl('add-flow', 'priority=65535,arp,arp_tpa=10.0.0.4,actions=output:3', '') info('flows s1: ' + s1.dpctl('dump-flows', '', '')) info('flows s2: ' + s1.dpctl('dump-flows', '', '')) info('flows s3: ' + s1.dpctl('dump-flows', '', '')) info('flows s4: ' + s1.dpctl('dump-flows', '', '')) # net.pingAll() if (with_windows): cleanUpScreens() execInXterm([h2, h3], './sf_hhe.sh; read') execInXterm([hc1, hc4], './sf_eth_nsh.sh; read') execInXterm([h4], './server.sh') execInXterm([h4], './tcp_dump.sh') execInXterm([h1, hc1, h2, h3, hc4, h4], './tcp_dump.sh') execInXterm([h1], 'xterm') ## wireshark cmd = 'wireshark' opts = '-i h1-eth0 -k' h1.cmd(cmd + ' ' + opts + '&') opts = '-i hc1-eth0 -k' hc1.cmd(cmd + ' ' + opts + '&') opts = '-i h2-eth0 -k' h2.cmd(cmd + ' ' + opts + '&') opts = '-i h3-eth0 -k' h3.cmd(cmd + ' ' + opts + '&') opts = '-i hc4-eth0 -k' hc4.cmd(cmd + ' ' + opts + '&') opts = '-i h4-eth0 -k' h4.cmd(cmd + ' ' + opts + '&') time.sleep(1) else: h4.sendCmd('./server.sh') time.sleep(1) hc4.sendCmd('./sf_eth_nsh.sh') time.sleep(1) h3.sendCmd('./sf_hhe.sh') time.sleep(1) h2.sendCmd('./sf_hhe.sh') time.sleep(1) hc1.sendCmd('./sf_eth_nsh.sh') time.sleep(1) #h1.sendCmd('./tcp_dump.sh > h1_tcp_dump.txt') h1.cmd('./client.sh') time.sleep(1) h1.sendCmd('python3 ../sf_hhe/HttpClient.py -ip 10.0.0.4') output(h1.waitOutput()) if (with_windows): execInXterm([h1], './client.sh ; read') CLI(net) cleanUpScreens() else: h1.cleanup() hc1.cleanup() h2.cleanup() h3.cleanup() hc4.cleanup() h4.cleanup() net.stop()
def cleanup(cls): """Clean up junk which might be left over from old runs; do fast stuff before slow dp and link removal!""" info("*** Removing excess controllers/ofprotocols/ofdatapaths/" "pings/noxes\n") zombies = "controller ofprotocol ofdatapath ping nox_core lt-nox_core " zombies += "ovs-openflowd ovs-controller udpbwtest mnexec ivs" # Note: real zombie processes can't actually be killed, since they # are already (un)dead. Then again, # you can't connect to them either, so they're mostly harmless. # Send SIGTERM first to give processes a chance to shutdown cleanly. sh("killall " + zombies + " 2> /dev/null") time.sleep(1) sh("killall -9 " + zombies + " 2> /dev/null") # And kill off sudo mnexec sh('pkill -9 -f "sudo mnexec"') info("*** Removing junk from /tmp\n") sh("rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log") info("*** Removing old X11 tunnels\n") cleanUpScreens() info("*** Removing excess kernel datapaths\n") dps = sh("ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'").splitlines() for dp in dps: if dp: sh("dpctl deldp " + dp) info("*** Removing OVS datapaths\n") dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() if dps: sh("ovs-vsctl " + " -- ".join("--if-exists del-br " + dp for dp in dps if dp)) # And in case the above didn't work... dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() for dp in dps: sh("ovs-vsctl del-br " + dp) info("*** Removing all links of the pattern foo-ethX\n") links = sh("ip link show | " "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'").splitlines() # Delete blocks of links n = 1000 # chunk size for i in xrange(0, len(links), n): cmd = ";".join("ip link del %s" % link for link in links[i : i + n]) sh("( %s ) 2> /dev/null" % cmd) if "tap9" in sh("ip link show"): info("*** Removing tap9 - assuming it's from cluster edition\n") sh("ip link del tap9") info("*** Killing stale mininet node processes\n") killprocs("mininet:") info("*** Shutting down stale tunnels\n") killprocs("Tunnel=Ethernet") killprocs(".ssh/mn") sh("rm -f ~/.ssh/mn/*") # Call any additional cleanup code if necessary for callback in cls.callbacks: callback() # Containernet should also cleanup pending Docker cmd = "docker rm -f $( docker ps --filter 'label=com.containernet' -a -q)" call(cmd, shell=True, stdout=open(os.devnull, "wb"), stderr=open(os.devnull, "wb")) info("*** Cleanup complete.\n")