Example #1
0
def execInXterm(nodes, script):
    for node in nodes:
        makeTerm(node,
                 title=str(node) + ' ' + script,
                 term='xterm',
                 display=None,
                 cmd='bash -c ' + script)
Example #2
0
def cs43net():
    stophttp()
    "Create a simple network for cs43"
    r = get_ip_setting()
    if r == -1:
        exit("Couldn't load config file for ip addresses, check whether %s exists" % IPCONFIG_FILE)
    else:
        info( '*** Successfully loaded ip settings for hosts\n %s\n' % IP_SETTING)

    topo = CS43Topo()
    info( '*** Creating network\n' )
    net = Mininet( topo=topo, controller=RemoteController, ipBase=IPBASE )
    net.start()
    server1, server2, client, router = net.get( 'server1', 'server2', 'client', 'sw0')
    s1intf = server1.defaultIntf()
    s1intf.setIP('%s/8' % IP_SETTING['server1'])
    s2intf = server2.defaultIntf()
    s2intf.setIP('%s/8' % IP_SETTING['server2'])
    clintf = client.defaultIntf()
    clintf.setIP('%s/8' % IP_SETTING['client'])


    for host in server1, server2, client:
        set_default_route(host)
    starthttp( server1 )
    starthttp( server2 )
    makeTerm(client)
    CLI( net )
    stophttp()
    net.stop()
def createTopo():
    logging.debug("Create FatTopo")
    global topo
    topo = FatTopo()
    topo.createTopo()
    topo.createLink()

    logging.debug("Start Mininet")
    CONTROLLER_IP = "127.0.0.1"
    CONTROLLER_PORT = 6633
    global net
    net = Mininet(topo=topo, link=TCLink, controller=None)
    net.addController('controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT)
    net.start()

    logger.debug("dumpNode")
    # enableSTP()
    dumpNodeConnections(net.hosts)

    # pingTest(net)
    # iperfTest(net, topo)
    root = net.get(topo.HostList[0])
    makeTerm(root)
    # root.cmd('java -jar ../ncintents-bridge.jar 9000 &')

    run(host='172.20.4.112', port=8090)
    CLI(net)
    net.stop()
Example #4
0
    def run(self):
        '''Run the lab 3 simulation environment'''

        localJitter = 10 # ms, the evolution of the time between two consecutive packets
        # We create the topology
        topology = Lab3Topology(nbOfServersPerRegion, nbOfClientsPerRegion, nbOfRegions)
        # We create the simulation
        # Set the topology, the class for links and interfaces, the mininet environment must be cleaned up before launching, we should build now the topology
        simulation = Mininet(topo = topology, link = TCLink, intf = TCIntf, cleanup = True, build = True, ipBase='10.1.0.0/24')
        # We connect the network to Internet
        simulation.addNAT().configDefault()
        # We can start the simulation
        print "Starting the simulation..."
        simulation.start()
        # For each host
        for host in simulation.hosts:
            # We set the jitter (It can only be done after the simulation was started, not from the Topology)
            host.defaultIntf().config(jitter = ("%dms" % localJitter))
        # for each server
        for server in simulation.hosts:
            if "vessel" in server.name:
                # We open a xterm and start the server
                self.startServer(server)
        makeTerm(node=simulation.getNodeByName("client1"), cmd="firefox")
        # We also start the Command Line Interface of Mininet
        CLI(simulation)
        # Once the CLI is closed (with exit), we can stop the simulation
        print "Stopping the simulation NOW!"
        # We close the xterms (mininet.term.cleanUpScreens)
        cleanUpScreens()
        simulation.stop()
def createTopo():
    logging.debug("Create FatTopo")
    global topo
    topo = FatTopo()
    topo.createTopo()
    topo.createLink()

    logging.debug("Start Mininet")
    CONTROLLER_IP = "127.0.0.1"
    CONTROLLER_PORT = 6633
    global net
    net = Mininet(topo=topo, link=TCLink, controller=None)
    net.addController('controller',
                      controller=RemoteController,
                      ip=CONTROLLER_IP,
                      port=CONTROLLER_PORT)
    net.start()

    logger.debug("dumpNode")
    # enableSTP()
    dumpNodeConnections(net.hosts)

    # pingTest(net)
    # iperfTest(net, topo)
    root = net.get(topo.HostList[0])
    makeTerm(root)
    # root.cmd('java -jar ../ncintents-bridge.jar 9000 &')

    run(host='172.20.4.112', port=8090)
    CLI(net)
    net.stop()
Example #6
0
 def startServer(self, server):
     # Call mininet.term.makeTerm
     makeTerm(node=server,
              cmd="python {} --id {} --vessels {}".format(
                  self.pathToServer,
                  server.IP().replace('10.1.0.', ''),
                  self.nbOfServersPerRegion * self.nbOfRegions))
Example #7
0
def start_bwm(node, filename=None):
    from mininet.term import makeTerm

    cmd = ["bwm-ng", "-u", "bits"]
    if filename:
        return node.popen(cmd + ["-o", "csv", "-F", filename, "-T", "sum"])
    else:
        makeTerm(node, cmd="bash -c '%s || read'" % " ".join(cmd))
        return None
Example #8
0
def run_adaptive_redundancy(host_num, coder_log_conf):
    """Run network application for multi-hop topology

    :param host_num (int): Number of hosts
    :param profile (int): To be tested profile
    :param coder_log_conf (dict): Configs for logs of coders
    """

    net = Containernet(controller=RemoteController,
                       link=TCLink,
                       autoStaticArp=True)
    mgr = VNFManager(net)
    hosts = create_topology(net, host_num)

    try:
        info("*** Starting network\n")
        net.start()
        # MARK: Use static ARP to avoid ping losses
        # info("*** Ping all to update ARP tables of each host\n")
        # net.pingAll()
        info("*** Adding OpenFlow rules\n")
        add_ovs_flows(net, host_num)
        info("*** Disable Checksum offloading\n")
        disable_cksum_offload(host_num)

        info("*** Deploy coders\n")
        coders = deploy_coders(mgr, hosts)
        # Wait for coders to be ready

        info("*** Starting Ryu controller\n")
        c0 = net.get("c0")
        makeTerm(c0, cmd="ryu-manager adaptive_rlnc_sdn_controller.py ; read")

        # s2 = net.get('s2')
        # makeTerm(s2, cmd="watch -n 1 ovs-ofctl dump-flows s2")

        # s4 = net.get('s4')
        # makeTerm(s4, cmd="watch -n 1 ovs-ofctl dump-flows s4")

        time.sleep(3)

        info("*** Run Iperf\n")
        run_iperf_test(hosts[0], hosts[-1], "udp", 30)
        print_coders_log(coders, coder_log_conf)
        remove_coders(mgr, coders)

        info("*** Emulation stops...")

    except Exception as e:
        error("*** Emulation has errors:")
        error(e)
    finally:
        info("*** Stopping network\n")
        net.stop()
        mgr.stop()
Example #9
0
def add_table_entries(s1, s2, cmd1):
    table = '../utils/table.txt'
    info(
        '*** Adding data to tables, please wait until s1 and s2 xterms get closed.\n'
    )
    makeTerm(s1,
             title='s1',
             cmd="bash -c 'echo \"adding table entries...\n"
             "please wait for this terminal to close. \" &&"
             " {} 50001 < {} >/dev/null 2>&1;'".format(cmd1, table))
    makeTerm(s2,
             title='s2',
             cmd="bash -c 'echo \"adding table entries...\n"
             "please wait for this terminal to close. \" &&"
             " {} 50002 < {} >/dev/null 2>&1;'".format(cmd1, table))
Example #10
0
    def charge(self, in_xterm=False, intf=None):
        """Starting the charging process.
        :param in_xterm: True to run the charge inside an xterm instance. Default: False.
        :param intf: the interface to which search for EVSE. Default: None, use the default interface. Has permanent effect.
        """

        print("*** Looking for EVSE and start charging...")

        # setting the interface (default: default interface)
        if intf is not None:
            self.setIntf(intf)
        elif self.getProperties()['network.interface'] == "":
            print("* No intf selected, using default interface.")
            self.setIntf(self.intf().name)

        if in_xterm:
            # run inside an xterm. You must append the return value to net.terms to terminal on exit.
            command = "cd ./{}; java -jar rise-v2g-evcc-*.jar; bash -i".format(
                self.folder)
            # this return a list of just one xterm, so [0] is needed
            self.proc = makeTerm(self,
                                 cmd="bash -i -c '{}'".format(command))[0]
            return self.proc
        else:
            self.proc = self.popen(
                "cd ./{}; java -jar rise-v2g-evcc-*.jar".format(self.folder),
                shell=True)
            # print the stdout to the CLI at the end of the charging process
            proc_stdout = self.proc.communicate()[0].strip()
            print(proc_stdout)
Example #11
0
    def startCharge(self, in_xterm=True, intf=None):
        """
        Spawn an xterm and start the listening phase in it.
        It is not possible to launch it without xterm because otherwise sometimes it randomly crashes.
        :param intf: Interface to listen for charging requests. If None, default is used. Has permanent effect.
        :returns A popen xterm instance. To be appended to "net.terms" to assure a correct close on exit."""

        print("*** Starting waiting for EVs...")

        # setting the interface (default: default interface)
        if intf is not None:
            self.setIntf(intf)
        elif self.getProperties()['network.interface'] == "":
            print("* No intf selected, using default interface.")
            self.setIntf(self.intf().name)

        if in_xterm:
            # run inside an xterm. You must append the return value to net.terms to terminal on exit.
            command = "cd ./{}; java -jar rise-v2g-secc-*.jar; bash -i".format(
                self.folder)
            # this return a list of just one xterm, so [0] is needed
            self.proc = makeTerm(self,
                                 cmd="bash -i -c '{}'".format(command))[0]
        else:
            self.proc = self.popen(
                "cd ./{}; java -jar rise-v2g-secc-*.jar".format(self.folder),
                shell=True)
            # print the stdout to the CLI at the start of the charging process
            proc_stdout = self.proc.communicate(timeout=15)[0].strip()
            print(proc_stdout)
        return self.proc
Example #12
0
 def do_vm_xterm(self, line):
     parts = line.split()
     if len(parts) == 2:
         c = self.mn[parts[0]]
         cmd = 'bash --init-file <(echo " exec ~/mininet/util/m %s-%s")' % (parts[0], parts[1])
         title = 'VM: %s / Node' % parts[1]
         self.mn.terms += makeTerm(c, cmd = cmd, title=title)
     else:
         error('Only two arguments to vm_xterm: vm_xterm c1 1-h1\n')
Example #13
0
def run():

    num_hosts = 1 + ARGS.servers
    if ARGS.thrift_tcp or ARGS.thrift_udp or ARGS.thrift_ddc:
        num_hosts += 2
    privateDirs = [('./tmp/config', '/tmp/%(name)s/var/config')]
    host = partial(Host, privateDirs=privateDirs)
    if ARGS.is_p4:
        switch_model = 'p4/p4_switch/simple_switch/simple_switch'
        json_router = 'p4/p4_switch/ip6_16.json'
        topo = BlueBridgeTopo(sw_path=switch_model,
                              json_path=json_router,
                              thrift_port=9090,
                              pcap_dump=False,
                              num_hosts=num_hosts)
        heimdall = RemoteController('c', '0.0.0.0', 6633)
        net = Mininet(topo=topo,
                      host=host,
                      switch=P4Switch,
                      controller=heimdall)
    else:
        topo = BlueBridgeTopo(num_hosts=num_hosts)
        net = Mininet(topo=topo, host=host, build=False, controller=None)
        net.build()
    net.start()
    directories = [
        directory[0] if isinstance(directory, tuple) else directory
        for directory in privateDirs
    ]
    info('Private Directories:', directories, '\n')

    # Configure our current "switch"
    configureSwitch(num_hosts)
    # Configure routing and generate the bluebridge settings
    configureHosts(net, num_hosts)
    # net.startTerms()

    makeTerm(net.hosts[0])  # The client
    # makeTerm(net.hosts[1]) # Thrift remote_mem server
    # makeTerm(net.hosts[2]) # Thrift simple_arr_comp server

    CLI(net)
    net.stop()
    clean()
Example #14
0
def topology(attack):
    "Create a network."
    net = Mininet_wifi()

    info("*** Creating nodes\n")
    sta1 = net.addStation('sta1', encrypt='wpa2')
    sta2 = net.addStation('sta2', encrypt='wpa2')
    ap1 = net.addAccessPoint('ap1',
                             ssid="simplewifi",
                             mode="g",
                             channel="1",
                             passwd='123456789a',
                             encrypt='wpa2',
                             failMode="standalone",
                             datapath='user',
                             wps_state='2',
                             config_methods='label display push_button keypad')

    info("*** Configuring wifi nodes\n")
    net.configureWifiNodes()

    info("*** Associating Stations\n")
    net.addLink(sta1, ap1)
    net.addLink(sta2, ap1)

    info("*** Starting network\n")
    net.build()
    ap1.start([])

    if attack:
        ap1.cmd('hostapd_cli -i ap1-wlan1 wps_ap_pin set 12345670')
        sta1.cmd('iw dev sta1-wlan0 interface add mon0 type monitor')
        sta1.cmd('ip link set mon0 up')
        makeTerm(sta1)  #reaver -i mon0 -b 02:00:00:00:02:00 -vv
    else:
        ap1.cmd('hostapd_cli -i ap1-wlan1 wps_pin any 12345670')
        sta1.cmd('wpa_cli -i sta1-wlan0 wps_pin 02:00:00:00:02:00 12345670')
        sta2.cmd('wpa_cli -i sta2-wlan0 wps_pin 02:00:00:00:02:00 12345670')

    info("*** Running CLI\n")
    CLI(net)

    info("*** Stopping network\n")
    net.stop()
Example #15
0
 def xterm(self, _ignore=None):
     "Make an xterm when a button is pressed."
     if (self.selection is None or self.net is None
             or self.selection not in self.itemToWidget):
         return
     name = self.itemToWidget[self.selection]['text']
     if name not in self.net.nameToNode:
         return
     term = makeTerm(self.net.nameToNode[name], 'Host')
     self.net.terms.append(term)
Example #16
0
 def do_vm_xterm(self, line):
     parts = line.split()
     if len(parts) == 2:
         c = self.mn[parts[0]]
         cmd = 'bash --init-file <(echo " exec ~/mininet/util/m %s-%s")' % (
             parts[0], parts[1])
         title = 'VM: %s / Node' % parts[1]
         self.mn.terms += makeTerm(c, cmd=cmd, title=title)
     else:
         error('Only two arguments to vm_xterm: vm_xterm c1 1-h1\n')
Example #17
0
 def xterm( self, _ignore=None ):
     "Make an xterm when a button is pressed."
     if ( self.selection is None or
          self.net is None or
          self.selection not in self.itemToWidget ):
         return
     name = self.itemToWidget[ self.selection ][ 'text' ]
     if name not in self.net.nameToNode:
         return
     term = makeTerm( self.net.nameToNode[ name ], 'Host' )
     self.net.terms.append( term )
Example #18
0
    def run_exercise(self):
        """ Sets up the mininet instance, programs the switches,
            and starts the mininet CLI. This is the main method to run after
            initializing the object.
        """
        # Initialize mininet with the topology specified by the config
        self.create_network()
        self.net.plotGraph(max_x=700, max_y=700)
        self.config_mobility()
        self.net.start()
        sleep(1)

        # some programming that must happen after the net has started
        if self.net.stations:
            self.program_stations()
        if self.net.hosts:
            self.program_hosts()
        if self.net.aps:
            self.program_aps()
        if self.net.switches:
            self.program_switches()

        # wait for that to finish. Not sure how to do this better
        sleep(1)

        makeTerm(self.net.aps[1], cmd="bash -c 'python send.py ap2;'")
        makeTerm(self.net.hosts[1], cmd="bash -c 'python receive.py;'")
        makeTerm(self.net.stations[0], cmd="bash -c 'ping 10.0.2.2;'")

        self.do_net_cli()
        # stop right after the CLI is exited

        os.system('pkill -f \"xterm -title\"')
        self.net.stop()
Example #19
0
def main():
    nb_hosts, nb_switches, links = read_topo()

    topo = MyTopo(args.behavioral_exe,
                  args.json,
                  nb_hosts, nb_switches, links)

    net = Mininet(topo = topo,
                  host = P4Host,
                  switch = P4Switch,
                  autoStaticArp = True,
                  controller = None,
                  autoSetMacs = True)
    net.start()

    for n in xrange(nb_hosts):
        h = net.get('h%d' % (n + 1))
        for off in ["rx", "tx", "sg"]:
            cmd = "/sbin/ethtool --offload eth0 %s off" % off
            print cmd
            h.cmd(cmd)
        print "disable ipv6"
        h.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
        h.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
        h.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
        h.cmd("sysctl -w net.ipv4.tcp_congestion_control=reno")
        h.cmd("iptables -I OUTPUT -p icmp --icmp-type destination-unreachable -j DROP")
        h.cmd("sh h%d.vlan" % (n + 1)) # Create interface eth0.16 and add static ARP entries

    sleep(1)

    for i in xrange(nb_switches):
        # Create mirror_id j to clone packets towards j-th port
        for j in range(len(topo.ports['s'+str(i+1)])):
            cmd = ['echo "mirroring_add',str(j+1),str(j+1),'" | ~/bmv2/targets/simple_switch/sswitch_CLI',args.json, str(_THRIFT_BASE_PORT + i)]
            print " ".join(cmd)
            subprocess.call(" ".join(cmd), shell=True)
        
        # added "--pre SimplePreLAG" refer to https://github.com/p4lang/behavioral-model
        cmd = [args.cli, "--json", args.json,
               "--thrift-port", str(_THRIFT_BASE_PORT + i), "--pre", "SimplePreLAG"]
        with open("commands_%d.txt" % (i+1), "r") as f:
            print " ".join(cmd)
            try:
                output = subprocess.check_output(cmd, stdin = f)
                print output
            except subprocess.CalledProcessError as e:
                print e
                print e.output

    sleep(1)

    print "Ready !"

    makeTerm(net['s1'],title="Detect and Redirect node - Primary path/Detected link",cmd="tcpdump -n -i s1-eth2 -Uw - | tcpdump -en -r - vlan;echo;echo;echo Last command: \x1B[32m'tcpdump -n -i s1-eth2 -Uw - | tcpdump -en -r - vlan'\x1B[0m; bash")
    makeTerm(net['s1'],title="Detect and Redirect node - Backup path",cmd="tcpdump -n -i s1-eth3 -Uw - | tcpdump -en -r - vlan;echo;echo;echo Last command: \x1B[32m'tcpdump -n -i s1-eth3 -Uw - | tcpdump -en -r - vlan'\x1B[0m; bash")
    makeTerm(net['h1'],title="Host H1",cmd="ping 192.168.100.2;echo;echo;echo Last command: \x1B[32mping 192.168.100.2 -i 1\x1B[0m; bash")

    CLI( net )
    net.stop()
Example #20
0
def startPig(net, xterm=False):
    subprocess.call(VETH_CMD, shell=True)
    snort1 = net.get('snort1')
    interfaces = snort1.intfNames()
    snort1.cmd("brctl addbr snort_bridge")
    for intfs in interfaces:
        info('*** Adding interface ' + intfs + ' to snort_bridge' + '\n')
        snort1.cmd("brctl addif snort_bridge " + intfs)
    snort1.cmd("ifconfig snort_bridge up")

    Intf('veth1', node=snort1)

    info('*** Starting PIG\n')
    if not xterm:
        snort1.cmd('ifconfig veth1 192.168.100.2; ' + PIG_CMD +
                   ' >> /dev/null 2>&1 &')
    else:
        net.terms += makeTerm(snort1, 'tmp')
        time.sleep(2)
        net.terms += makeTerm(snort1,
                              'Pig Relay',
                              cmd="bash startPigrelay.sh '" + PIG_DIR + "'")
Example #21
0
def simpleTest():
    "Create and test a simple network"
    topo = TreeTopo(depth=1, fanout=2)
    net = Mininet(topo, controller=RemoteController)
    net.start()
    print "Dumping host connections"
    dumpNodeConnections(net.hosts)
    print "Testing network connectivity"

    #ping_all_cmd = "fping -t 10 -l -p 5000 " + " ".join([host.IP() for host in net.hosts])+" > /tmp/%s_logs.txt &"
    #for host in net.hosts:
    #    host.cmd(ping_all_cmd%host.name)
    #print(dir(host))

    for host in net.hosts:
        term.makeTerm(host)

    while True:
        net.ping(timeout=20)
        time.sleep(6)

    net.stop()
Example #22
0
def startall( self, line ):
	"mycmd is an example command to extend the Mininet CLI"
	net = self.mn
	for h in net.hosts:
		if h.name == "h1" or random.random() < 0.8:
			print "Start java in " , h
			shortName = h.name
			cmdLine = 'echo -ne "\033]0;' + shortName[1:] + '\007";'
			cmdLine += "java -jar ~/dev/A1.jar | tee /tmp/" + h.name + ".log"
			print "  Calling command: " , cmdLine
			# Running in foreground
			net.terms += makeTerm(h, cmd=cmdLine)
			# Running in background
			# h.sendCmd(cmdLine)
			time.sleep(2)
Example #23
0
    def do_net_cli(self, no_term):
        """ Starts up the mininet CLI and prints some helpful output.

            Assumes:
                - A mininet instance is stored as self.net and self.net.start() has
                  been called.
        """
        for s in self.net.switches:
            s.describe()
        for h in self.net.hosts:
            h.describe()
        self.logger("Starting mininet CLI")
        # Generate a message that will be printed by the Mininet CLI to make
        # interacting with the simple switch a little easier.
        print('')
        print(
            '======================================================================'
        )
        print('Welcome to the BMV2 Mininet CLI!')
        print(
            '======================================================================'
        )
        print('Your P4 program is installed into the BMV2 software switch')
        print('and your initial configuration is loaded. You can interact')
        print('with the network using the mininet CLI below.')
        print('')
        # if self.switch_json:
        #     print('To inspect or change the switch configuration, connect to')
        #     print('its CLI from your host operating system using this command:')
        #     print('  simple_switch_CLI --thrift-port <switch thrift port>')
        #     print('')
        print('To view a switch log, run this command from your host OS:')
        print('  tail -f %s/<switchname>.log' % self.log_dir)
        print('')
        print('To view the switch pcaps, check the pcap files in %s:' %
              self.pcap_dir)
        print(' for example run:  sudo tcpdump -xxx -r s1-eth1.pcap')
        print('')
        print('To view the host pcaps, check the pcap files in %s:' %
              self.pcap_dir)
        print(' for example run:  sudo tcpdump -xxx -r h2_in.pcap')
        print('')

        if not no_term:
            for node in self.net.hosts:
                self.net.terms += makeTerm(node, term="xterm")

        CLI(self.net)
def createCustomTopology():

    net = Mininet(controller = RemoteController)

    info( '*** Adding controllers\n' )
    #cA = RemoteController('cA', ip="127.0.0.1", port=6633)
    
    cA = net.addController('cA', controller=RemoteController, ip = "127.0.0.1", port = 6633)

    info( '*** Adding hosts\n' )
    h1 = net.addHost('h1', ip='10.1.1.1', mac='0A:0A:00:00:00:01')
    h2 = net.addHost('h2', ip='10.1.1.2', mac='0A:0A:00:00:00:02')
    h3 = net.addHost('h3', ip='10.1.2.1', mac='0A:0B:00:00:00:01')
    h4 = net.addHost('h4', ip='10.1.2.2', mac='0A:0B:00:00:00:02')

    info( '*** Adding switches\n' )
    s1 = net.addSwitch( 's1', dpid='0000000000000001'  )     #Add dpid as string containing a 16 byte (0 padded) hex equivalent of the int dpid 
    s11 = net.addSwitch( 's11', dpid='000000000000000b' )
    s12 = net.addSwitch( 's12', dpid='000000000000000c' )
    
    
    info( '*** Adding links\n' )
    net.addLink(h1,s11)
    net.addLink(h2,s11)
    
    net.addLink(h3,s12)
    net.addLink(h4,s12)
    
    
    net.addLink(s11,s1)
    net.addLink(s12,s1)
    

    info('*** Starting network\n')
    net.build()
    s1.start([cA])
    s11.start([cA])
    s12.start([cA])
    net.terms += makeTerm(h1)
    #os.system('ryu-manager ryu.app.simple_switch_13 &')
    #time.sleep(10)
    net.pingAll()
    
    info('*** Running CLI\n')
    CLI(net)
    info('*** Stopping network')
    net.stop()
Example #25
0
    def start_decoder(self, in_xterm=True):
        """Starting the decoder.
        :param in_xterm: True to run the charge inside an xterm instance. Default: False."""

        print("*** Starting the decoder...")

        if in_xterm:
            # run inside an xterm. You must append the return value to net.terms to terminal on exit.
            command = "cd ./{}; java -jar V2Gdecoder.jar -w; bash -i".format(
                self.folder)
            # this return a list of just one xterm, so [0] is needed
            self.proc = makeTerm(self,
                                 cmd="bash -i -c '{}'".format(command))[0]
            return self.proc
        else:
            self.cmd(
                "cd ./{}; java -jar V2Gdecoder.jar -w".format(self.folder),
                "2>/dev/null 1>/dev/null &")
Example #26
0
    def start_server(self, dos_attack_1=False, dos_attack_2=False):
        in_xterm = True

        print("*** Starting the MiM server...")
        if in_xterm:
            # run inside an xterm. You must append the return value to net.terms to terminal on exit.
            if dos_attack_1:
                command = "python3 v2g_mim_server.py -d; bash -i".format(
                    self.folder)
            elif dos_attack_2:
                command = "python3 v2g_mim_server.py -c; bash -i".format(
                    self.folder)
            else:
                command = "python3 v2g_mim_server.py; bash -i".format(
                    self.folder)
            # this return a list of just one xterm, so [0] is needed
            self.proc = makeTerm(self,
                                 cmd="bash -i -c '{}'".format(command))[0]
            return self.proc
Example #27
0
def start_mininet():
    controller = RemoteController('c0', ip='127.0.0.1', port=6633)

    topo = DotTopo()
    topo.import_dot('../res/topology.dot')

    net = Mininet(topo=topo, controller=controller, link=TCLink)

    controller.start()
    net.start()

    # while True:
    random_host1 = net.get('h%d' % randint(1, 160))
    random_host2 = net.get('h%d' % randint(1, 160))

    makeTerm(random_host1, title='Iperf Server', cmd='iperf -s')
    makeTerm(random_host2,
             title='Iperf Client',
             cmd='iperf -c ' + random_host1.IP())

    file = open(config.TC_COMMANDS_PATH)

    start_time = time.time()
    rules_tc = json.loads(file.read())
    for rule_ip in rules_tc:
        print(rule_ip)
        for i in range(1, 160):
            host = net.get('h{}'.format(i))
            if host.IP() == rule_ip:
                print(rule_ip)
                break
        for rule in rules_tc:
            makeTerm(host, title='Commands', cmd=rule)

    end_time = time.time()
    elapsed_time = end_time - start_time
    return elapsed_time
def myNet():

    ctl = '192.168.1.102'
    net = Mininet(topo=None, link=TCLink, build=False)

    # Create nodes
    h1 = net.addHost('h1', mac='01:00:00:00:01:00', ip='192.168.0.1/24')
    h2 = net.addHost('h2', mac='01:00:00:00:02:00', ip='192.168.0.2/24')
    h3 = net.addHost('h3', mac='01:00:00:00:03:00', ip='192.168.0.3/24')

    # Create switches
    s1 = net.addSwitch('s1', listenPort=6634, dpid='0000000000000010')
    s2 = net.addSwitch('s2', listenPort=6634, dpid='0000000000000020')
    s3 = net.addSwitch('s3', listenPort=6634, dpid='0000000000000030')

    print "*** Creating links"
    net.addLink(
        h1,
        s1,
    )
    net.addLink(
        h2,
        s2,
    )
    net.addLink(
        h3,
        s3,
    )
    net.addLink(
        s1,
        s2,
    )
    net.addLink(
        s1,
        s3,
    )

    # Add Controllers
    ctrl = net.addController('c1',
                             controller=RemoteController,
                             ip=ctl,
                             port=6633)

    net.build()

    # Connect each switch to a different controller
    s1.start([ctrl])
    s2.start([ctrl])
    s3.start([ctrl])

    print "Testing network connectivity\n"
    net.pingAll()
    print "Dumping host connections\n"
    dumpNodeConnections(net.hosts)

    h1, h2, h3, s1, s2, s3 = net.getNodeByName('h1', 'h2', 'h3', 's1', 's2',
                                               's3')

    s1.cmdPrint('ovs-vsctl show')

    h1.cmd('/sbin/tc qdisc del dev h1-eth0 root')
    sleep(3)
    h1.cmd('ifconfig h1-eth0 txqueuelen 10000')
    sleep(2)
    # h1.cmd('/sbin/tc qdisc add dev h1-eth0 root handle 1:0 htb default 20 && '
    #        '/sbin/tc class add dev h1-eth0 parent 1:0 classid 1:1 htb rate 250kbps ceil 250kbps && '
    #        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:10 htb rate 240kbps ceil 240kbps && '
    #        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:20 htb rate 10kbps ceil 10kbps && '
    #        '/sbin/tc class add dev h1-eth0 parent 10:1 handle 2: prio bands 5 && '
    #        '/sbin/tc class add dev h1-eth0 parent 2:1 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 2:2 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 2:3 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 2:4 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 2:5 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 20:1 handle 3: prio bands 5 && '
    #        '/sbin/tc class add dev h1-eth0 parent 3:1 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 3:2 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 3:3 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 3:4 pfifo && '
    #        '/sbin/tc class add dev h1-eth0 parent 3:5 pfifo && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x78 0xff flowid 2:1 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x58 0xff flowid 2:2 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x40 0xff flowid 2:3 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x38 0xff flowid 2:4 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x00 0xff flowid 2:5 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.2 ip tos 0x78 0xff flowid 3:1 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x58 0xff flowid 3:2 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x40 0xff flowid 3:3 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x38 0xff flowid 3:4 && '
    #        '/sbin/tc filter add dev h1-eth0 protocol ip parent 2:0 prio 1 u32 match ip dst 192.168.0.3 ip tos 0x00 0xff flowid 3:5')

    # h1.cmd('/sbin/tc qdisc add dev h1-eth0 root handle 1: prio bands 5 && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:1 handle 10: htb rate 250kbps ceil 250kbps && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:2 handle 20: htb rate 250kbps ceil 250kbps && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:3 handle 30: htb rate 250kbps ceil 250kbps && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:4 handle 40: htb rate 250kbps ceil 250kbps && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:5 handle 50: htb rate 250kbps ceil 250kbps && '
    #        )

    # h1.cmd('/sbin/tc qdisc add dev h1-eth0 root handle 1: htb default 11 && '
    #        '/sbin/tc class add dev h1-eth0 parent 1: classid 1:1 htb rate 250kbps ceil 250kbps burst 250kb && '
    #        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:11 htb rate 240kbps ceil 240kbps burst 240kb && '
    #        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:12 htb rate 10kbps ceil 10kbps burst 10kb && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:11 handle 11: prio bands 5 priomap 4 4 4 4 3 3 3 3 2 2 1 1 0 0 0 0 && '
    #        '/sbin/tc qdisc add dev h1-eth0 parent 1:12 handle 12: prio bands 5 priomap 4 4 4 4 3 3 3 3 2 2 1 1 0 0 0 0 && '
    #        '/sbin/tc filter add dev h1-eth0 root 1: protocol ip prio 1 u32 match ip protocol 17 0xff flowid 1:1 && '
    #        '/sbin/tc filter add dev h1-eth0 parent 1:1 protocol ip prio 1 u32 match ip dst 192.168.0.2 flowid 1:11 && '
    #        '/sbin/tc filter add dev h1-eth0 parent 1:1 protocol ip prio 1 u32 match ip dst 192.168.0.3 flowid 1:12 && '
    #        '/sbin/tc filter add dev h1-eth0 parent 1:11 protocol ip prio 1 u32 match ip dst 192.168.0.2 flowid 11: && '
    #        '/sbin/tc filter add dev h1-eth0 parent 1:12 protocol ip prio 1 u32 match ip dst 192.168.0.3 flowid 12:'
    #        )
    """
    to shape data at    9.6kbps -> 76800bit -> 76.8kbit
                        4.8kbps -> 38400bit -> 38.4kbit
                        2.4kbps -> 19200bit -> 19.2kbit
                        1.2kbps -> 9600bit  -> 9.6kbit
                        0.6kbps -> 4800bit  -> 4.8kbit
                        240kbps -> 1920000bit -> 1920kbit
                        250kbps -> 2000000bit -> 2000kbit
    """
    h1.cmd(
        '/sbin/tc qdisc add dev h1-eth0 root handle 1: htb default 11 && '
        '/sbin/tc class add dev h1-eth0 parent 1: classid 1:1 htb rate 2000kbit ceil 2000kbit burst 250kb && '
        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:11 htb rate 4.8kbit ceil 4.8kbit burst 10kb && '
        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:12 htb rate 1920kbit ceil 1920kbit burst 240kb && '
        '/sbin/tc qdisc add dev h1-eth0 parent 1:11 handle 11: prio bands 4 priomap 3 3 2 3 0 3 1 3 3 3 3 3 3 3 3 3 && '
        '/sbin/tc qdisc add dev h1-eth0 parent 1:12 handle 12: prio bands 4 priomap 3 3 2 3 0 3 1 3 3 3 3 3 3 3 3 3 && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:1 handle 111: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:2 handle 112: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:3 handle 113: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:4 handle 114: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:1 handle 121: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:2 handle 122: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:3 handle 123: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:4 handle 124: netem limit 1000 delay 5ms && '
        '/sbin/tc filter add dev h1-eth0 parent 1: protocol ip prio 1 u32 matchall flowid 1:1 && '
        '/sbin/tc filter add dev h1-eth0 parent 1:1 protocol ip prio 1 u32 match ip dst 192.168.0.2 flowid 1:11 && '
        '/sbin/tc filter add dev h1-eth0 parent 1:1 protocol ip prio 1 u32 match ip dst 192.168.0.3 flowid 1:12 && '
        '/sbin/tc filter add dev h1-eth0 parent 1:11 protocol ip prio 1 u32 match ip dst 192.168.0.2 flowid 11: && '
        '/sbin/tc filter add dev h1-eth0 parent 1:12 protocol ip prio 1 u32 match ip dst 192.168.0.3 flowid 12: && '
        '/sbin/tc filter add dev h1-eth0 parent 11:1 protocol ip prio 1 u32 match ip dsfield 0x1e 0x1e flowid 111: && '
        '/sbin/tc filter add dev h1-eth0 parent 11:2 protocol ip prio 1 u32 match ip dsfield 0x16 0x1e flowid 112: && '  #match ip tos 0x58 0xff match ip protocol 0x11 0xff
        '/sbin/tc filter add dev h1-eth0 parent 11:3 protocol ip prio 1 u32 match ip dsfield 0x0e 0x1e flowid 113: && '
        '/sbin/tc filter add dev h1-eth0 parent 11:4 protocol ip prio 1 u32 match ip dsfield 0x04 0x1e match ip dsfield 0x00 0x1e flowid 114: && '
        '/sbin/tc filter add dev h1-eth0 parent 12:1 protocol ip prio 1 u32 match ip dsfield 0x1e 0x1e flowid 121: && '
        '/sbin/tc filter add dev h1-eth0 parent 12:2 protocol ip prio 1 u32 match ip dsfield 0x16 0x1e flowid 122: && '
        '/sbin/tc filter add dev h1-eth0 parent 12:3 protocol ip prio 1 u32 match ip dsfield 0x0e 0x1e flowid 123: && '
        '/sbin/tc filter add dev h1-eth0 parent 12:4 protocol ip prio 1 u32 match ip dsfield 0x04 0x1e match ip dsfield 0x00 0x1e flowid 124:'
    )

    sleep(3)
    makeTerm(h2,
             title='mgen receiver',
             cmd="mgen input receive.mgn output receive_log.txt")
    sleep(1)
    makeTerm(h1,
             title='class statistics',
             cmd="watch -dc tc -s -d -j class show dev h1-eth0")
    sleep(2)
    makeTerm(h1, title='mgen sender', cmd="mgen input send.mgn")

    # makeTerm(h2, title='mgen receiver', cmd='mgen input receive.mgn output receive_log.txt')
    # makeTerm(h2, title='packet sniffer receiver', cmd="sudo python packet_sniffer_receiver.py")
    # makeTerm(h1, title='packet sniffer sender', cmd="sudo python packet_sniffer_sender.py")
    # makeTerm(h1, title='qdisc statistics', cmd="sh log_qdisc.sh")
    # makeTerm(h1, title='class statistics', cmd="sh log_class.sh")
    # makeTerm(h1, title='mgen sender', cmd="mgen input send.mgn")
    sleep(1)

    CLI(net)
    net.stop()
    os.system('sudo mn -c')
Example #29
0
# Kill Mininet and/or Ryu
os.system("sudo mn -c 2> /dev/null")
os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null")

print 'Starting Ryu controller'
os.system('ryu-manager ~/ryu/ryu/app/openstate/ddos/ddos.py 2> /dev/null &')

print 'Starting Mininet'
net = Mininet(topo=SingleSwitchTopo(2),switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,listenPort=6634,autoStaticArp=True)
net.start()

time.sleep(5)

# Start Server @h2 on port 2000
makeTerm(net['h2'],cmd='python ~/ryu/ryu/app/openstate/echo_server.py 2000')

###############################################################################
print '\nTest 1: h1 connects to h2 without any ongoing attack'
time.sleep(2)
net['h1'].cmd('(echo "HI!" | nc -q3 -T af11 10.0.0.2 2000) &')
net['h1'].cmd('(echo "HI!" | nc -q3 -T af11 10.0.0.2 2000) &')

out = ''
attempts = 0
while 'ESTABLISHED' not in out and attempts<5:
	out = net['h2'].cmd('(netstat -an | grep tcp | grep 10.0.0.2:2000)')
	print 'Waiting %d seconds...' % (5-attempts)
	attempts += 1
	time.sleep(1)
Example #30
0
from mininet.node import RemoteController
from mininet.term import makeTerm


if '__main__' == __name__:
	net =Mininet(controller=RemoteController)
	c0 = net.addController('c0',ip='192.168.99.101',port=6633)
	s1 = net.addSwitch('s1')
	h1 = net.addHost('h1')
	h2 = net.addHost('h2', mac='00:00:00:00:00:22')
	h3 = net.addHost('h3', mac='00:00:00:00:00:23')
	h4 = net.addHost('h4', mac='00:00:00:00:00:24')

	net.addLink(s1, h1)
	net.addLink(s1, h1)
	net.addLink(s1, h2)
	net.addLink(s1, h3)
	net.addLink(s1, h4)

	net.build()
	c0.start()
	s1.start([c0])

	net.terms.append(makeTerm(s1))
	net.terms.append(makeTerm(h1))
	net.terms.append(makeTerm(h2))
	net.terms.append(makeTerm(h3))
	net.terms.append(makeTerm(h4))
	CLI(net)
	net.stop()
Example #31
0
    h1 = net.addHost('h1')
    h2 = net.addHost('h2')
    h3 = net.addHost('h3')

    Link(s1, h1)
    Link(s2, h2)
    Link(s3, h3)

    Link(s1, s2)
    Link(s2, s3)
    Link(s3, s1)
    

    net.build()
    c0.start()
    s1.start([c0])
    s2.start([c0])
    s3.start([c0])

    net.terms.append(makeTerm(c0))
    net.terms.append(makeTerm(s1))
    net.terms.append(makeTerm(s2))
    net.terms.append(makeTerm(s3))
    net.terms.append(makeTerm(h1))
    net.terms.append(makeTerm(h2))
    net.terms.append(makeTerm(h3))

    CLI(net)

    net.stop()
Example #32
0
def emptyNet():

    MININET_VERSION = re.sub(r'[^\d\.]', '', VERSION)

    "Create an empty network and add nodes to it."

    net = Mininet(controller=Controller)

    info('*** Adding controller\n')
    net.addController(name='c0',
                      controller=RemoteController,
                      ip='127.0.0.1',
                      protocol='tcp',
                      port=6633)

    #This is the 3 host, 1 switch topology.
    #The Switch IDs is of:0000000000000001.
    #The port for Snort is 4.
    #You must make sure this set in SnortManager.java in the Snort app to get it to work correctly.

    info('*** Adding hosts\n')
    h1 = net.addHost('h1', ip='10.0.0.1')
    h2 = net.addHost('h2', ip='10.0.0.2')
    h3 = net.addHost('h3', ip='10.0.0.3')
    snort1 = net.addHost('snort1', ip='10.0.0.4')

    info('*** Adding switch\n')
    s1 = net.addSwitch('s1')

    info('*** Creating links\n')
    net.addLink(s1, h1)
    net.addLink(s1, h2)
    net.addLink(s1, h3)
    net.addLink(snort1, s1)

    _intf = Intf('veth1', node=snort1)
    ''' 
    #This is the 9 host, 4 switch topology.
    #The Switch IDs go from of:0000000000000001 to of:0000000000000004.
    #The port for Snort is 4 for of:0000000000000001 and 5 for the rest.
    #You must make sure this set in SnortManager.java in the Snort app to get it to work correctly.

    info( '*** Adding hosts\n' )
    h1 = net.addHost('h1', ip='10.0.0.1')
    h2 = net.addHost('h2', ip='10.0.0.2')
    h3 = net.addHost('h3', ip='10.0.0.3')
    h4 = net.addHost('h4', ip='10.0.0.4')
    h5 = net.addHost('h5', ip='10.0.0.5')
    h6 = net.addHost('h6', ip='10.0.0.6')
    h7 = net.addHost('h7', ip='10.0.0.7')
    h8 = net.addHost('h8', ip='10.0.0.8')
    h9 = net.addHost('h9', ip='10.0.0.9')
    snort1 = net.addHost('snort1', ip='10.0.0.10')


    info( '*** Adding switches\n' )
    s1 = net.addSwitch( 's1' )
    s2 = net.addSwitch( 's2' )
    s3 = net.addSwitch( 's3' )
    s4 = net.addSwitch( 's4' )

    info( '*** Creating links\n' )
    net.addLink(s1, s2)
    net.addLink(s1, s3)
    net.addLink(s1, s4)

    net.addLink(s2, h1)
    net.addLink(s2, h2)
    net.addLink(s2, h3)
    net.addLink(s3, h4)
    net.addLink(s3, h5)
    net.addLink(s3, h6)
    net.addLink(s4, h7)
    net.addLink(s4, h8)
    net.addLink(s4, h9)
    
    net.addLink(s1, snort1)
    net.addLink(s2, snort1)
    net.addLink(s3, snort1)
    net.addLink(s4, snort1)
    '''

    info('*** Starting network\n')
    net.start()

    #This will find interfaces on the snort machine (excluding veth1) and bridge them into one interface.
    interfaces = net.get("snort1").intfNames()[:-1]
    snort1.cmd("brctl addbr snort_bridge")
    for intfs in interfaces:
        info('*** Adding interface ' + intfs + ' to snort_bridge' + '\n')
        snort1.cmd("brctl addif snort_bridge " + intfs)
    snort1.cmd("ifconfig snort_bridge up")

    #Inital terminal to setup a x11 tunnel for remote machines
    #TODO: add command to exit this terminal for local machines
    net.terms += makeTerm(snort1, 'tmp')
    time.sleep(2)

    net.terms += makeTerm(snort1,
                          'Pig Relay',
                          cmd="bash startPigrelay.sh '" + sys.argv[1] + "'")
    info('*** Running CLI\n')
    CLI(net)

    h1.deleteIntfs()
    h2.deleteIntfs()
    h3.deleteIntfs()
    snort1.deleteIntfs()

    info('*** Stopping network\n')
    net.stop()
Example #33
0

os.system("xterm -e 'ryu-manager ~/ryu/ryu/app/openstate/forwarding_consistency_1_to_many.py'&")



######Starting mininet

mytopo=SingleSwitchTopo(4)
time.sleep(1)
print("\n********************************** HELP *********************************************")
print("\nType \"python ~/ryu/ryu/app/openstate/echo_server.py 200\" in h2's xterm")
print("Type \"python ~/ryu/ryu/app/openstate/echo_server.py 300\" in h3's xterm")
print("Type \"python ~/ryu/ryu/app/openstate/echo_server.py 400\" in h4's xterm")
print("Type \"nc 10.0.0.2 80\" in all h1's xterms\n")
print("In order to test new path selection, close and reopen netcat")
print("\nTo exit type \"ctrl+D\" or exit")
print("*************************************************************************************")
net = Mininet(topo=mytopo,switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,listenPort=6634,autoStaticArp=True)
net.start()
h1,h2,h3,h4  = net.hosts[0], net.hosts[1], net.hosts[2], net.hosts[3]
for i in range(3):
      makeTerm(h1)
makeTerm(h2)
makeTerm(h3)
makeTerm(h4)
CLI(net)
net.stop()
os.system("sudo mn -c")
os.system("kill -9 $(pidof -x ryu-manager)")
Example #34
0
from mininet.node import UserSwitch, Controller, RemoteController
from mininet.cli import CLI
from mininet.term import makeTerm

import os

net = Mininet(switch = UserSwitch)

h1 = net.addHost('h1', ip='10.0.0.1', mac="00:00:00:00:00:01")
h2 = net.addHost('h2', ip='10.0.0.2', mac="00:00:00:00:00:02")
client = net.addHost('client', ip='10.0.0.3', mac="00:00:00:00:00:03")

s1 = net.addSwitch('s1')

net.addLink(s1, h1)
net.addLink(s1, h2)
net.addLink(s1, client)

c0 = net.addController('c0', controller=RemoteController)

net.start()
makeTerm(h1)
makeTerm(h2)
#h1.cmdPrint('python echo_server.py 80 > h1.txt')
makeTerm(client)

CLI(net)

net.stop()
os.system("sudo mn -c")
Example #35
0
def generate_flow_entries_dict(GUI=False):

    global requests, faults
    global G, pos, hosts, switches, mapping
    global mn_topo
    global net
    global mn_topo_ports

    if (os.path.isfile('./tmp/last_results_hash')):
        f=open('./tmp/last_results_hash','r')
        if (str(hh)!=f.read()):
            print('Erasing figs folder...')
            f.close()
            files = glob.glob('./figs/*')
            for f in files:
                os.remove(f)
            f=open('./tmp/last_results_hash','w+')
            f.write(str(hh))
            f.close()
    else:
        f=open('./tmp/last_results_hash','w+')
        f.write(str(hh))
        f.close()
        print('Erasing figs folder...')
        f.close()
        files = glob.glob('./figs/*')
        for f in files:
            os.remove(f)

    if (os.path.isfile('./tmp/' + hh + '-requests.p') and os.path.isfile('./tmp/' + hh + '-faults.p')):
        print 'Loading chached requests, faults...'
        requests = pickle.load(open('./tmp/' + hh + '-requests.p'))
        faults = pickle.load(open('./tmp/' + hh + '-faults.p'))
    else:
        print 'Parsing ampl results (it may take a while)...'
        requests, faults = parse_ampl_results()

    print len(requests), 'requests loaded'
    print len(faults), 'faults loaded'

    print "Building network graph from network.xml..."
    G, pos, hosts, switches, mapping = parse_network_xml()
    print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts'

    print "NetworkX to Mininet topology conversion..."
    mn_topo = networkx_to_mininet(G, hosts, switches, mapping)

    '''
    Mininet API 2.1.0ps
    mn_topo.ports = {'s3': {'s2': 1, 's4': 2}, 's2': {'s3': 1, 's1': 2, 's5': 3}, ...}

    Mininet API 2.2.0
    mn_topo.ports = {'s3': {1: ('s2', 1), 2: ('s4', 1)}, 's2': {1: ('s3', 1), 2: ('s1', 1), 3: ('s5', 1)}, ...}

    Our parser is based on old API. mn_topo_ports is an adapted version of mn_topo.ports according to the old API
    '''
    for k in mn_topo.ports:
        mn_topo_ports[k]={}
        for k2 in mn_topo.ports[k]:
            mn_topo_ports[k][ mn_topo.ports[k][k2][0] ] = k2
    
    print "Cleaning previous Mininet instances..."
    os.system('sudo mn -c 2> /dev/null')
    net = Mininet(topo=mn_topo, link=TCLink, controller=RemoteController, switch=UserSwitch, cleanup=True,autoSetMacs=False,listenPort=6634)
    print "Starting Mininet topology..."
    net.start()

    # Setup of MAC and IP 
    for i in range(len(net.hosts)):
        host_name = str(net.hosts[i])
        host_number = host_name[1:]
        mac_str = int_to_mac_str(int(host_number))
        ip_str = int_to_ip_str(int(host_number))
        net.hosts[i].setMAC(mac_str,'h'+host_number+'-eth0')
        net.hosts[i].setIP(ip_str,8,'h'+host_number+'-eth0')
        #makeTerm(net.hosts[i])

    if not GUI:
        s = raw_input("\n\x1B[32mInsert host numbers (separated by spaces) to open xterm: \x1B[0m")
        host_indexes = map(int, s.split())
        if len(host_indexes)>0:
            for i in host_indexes:
                if 'h'+str(i) in net:
                    makeTerm(net['h'+str(i)])
    
    # Setup of Static ARP Entries
    for src in net.hosts:
        for dst in net.hosts:
            if src != dst:
                src.setARP(ip=dst.IP(), mac=dst.MAC())

    # Flow entries creation

    # flow_entries_dict is a dict() that associates nodes with their flow entries
    global flow_stats_dict
    global flow_entries_dict
    global group_entries_dict
    global group_ID
    # fault_ID is a dict() that associates faults with an ID
    fault_ID = dict()

    # Associate req (A,B) and fault (X,Y) with a progressive number, starting from 1.
    # group_IDs are used as group entries index
    i=1
    for r in requests:
        for f in requests[r]['faults']:
            group_ID[(r,f)]=i
            group_ID[(r,(f[1],f[0]))]=i
            i+=1

    # Associate fault (X,Y) with a progressive number, starting from 1. fault_IDs are used for MPLS tags and flow states.
    # Actually tag and state values will be shifted by 16 because MPLS label values 0-15 are reserved.
    for i in range(1,len(faults.keys())+1):
        fault_ID = dict(fault_ID.items() + [(faults.keys()[i-1], i)])

    for i in range(len(requests)):
        request = requests.keys()[i]
        print "Processing REQUEST %d/%d: %s" %(i+1,len(requests),request)
        #detect nodes for this request
        detect_nodes=Set([])
        redirect_nodes=Set([])
        for y in range(len(requests[request]['faults'])):
            detect_nodes.add(requests[request]['faults'].items()[y][1]['detect_node'])
        for y in range(len(requests[request]['faults'])):
            redirect_nodes.add(requests[request]['faults'].items()[y][1]['redirect_node'])

        # [1] Primary Path rules
        primary_path = requests[request]['primary_path']
        for x in range(len(primary_path)):
            #print "Installing Primary Path rules in node", primary_path[x]
            # match(SRC_MAC, DEST_MAC, in_port, state=0, flags=PRIMARY_LINK_UP) -> action(output(next_primary_hop))

            if primary_path[x] not in detect_nodes and primary_path[x] not in redirect_nodes :
                #[NORMAL NODE]
                if x == 0: # first node in the primary path
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['h'+str(primary_path[x])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        state=0)
                    flow_entry['actions']=[ofparser.OFPActionPushMpls()]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions']),ofparser.OFPInstructionGotoTable(1),ofparser.OFPInstructionWriteMetadata(16,0xffffffffffffffff)]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')

                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['h'+str(primary_path[x])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        eth_type=0x8847,metadata=16)
                    flow_entry['actions']=[ofparser.OFPActionSetField(mpls_label=16),
                        ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=1
                    flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)

                elif x == len(primary_path)-1: # last node in the primary path
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x-1])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        state=0,eth_type=0x8847)
                    flow_entry['actions']=[ofparser.OFPActionPopMpls(),
                        ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['h'+str(primary_path[x])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')

                else: # intermediate node in the primary path
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x-1])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        state=0,eth_type=0x8847)
                    flow_entry['actions']=[ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')

            elif primary_path[x] in detect_nodes and primary_path[x] in redirect_nodes:
                #[DETECT AND REDIRECT]
                if x == 0: # first node in the primary path
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['h'+str(primary_path[x])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        state=0)
                    flow_entry['actions']=[ofparser.OFPActionPushMpls(),ofparser.OFPActionGroup(group_ID[(request,(primary_path[x],primary_path[x+1]))])]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')

                    #bucket creation (go to the next primary node)
                    max_len = 2000
                    actions = [ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])],0),
                                ofparser.OFPActionSetField(mpls_label=16)]
                    weight = 0
                    watch_port = mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])]
                    watch_group = ofproto.OFPG_ANY
                    bucket = ofparser.OFPBucket(weight, watch_port, watch_group,actions)
                    group_entries_dict = add_group_entry(group_entries_dict,primary_path[x],group_ID[(request,(primary_path[x],primary_path[x+1]))],bucket)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'group')
                else:
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x-1])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        state=0,eth_type=0x8847)
                    flow_entry['actions']=[ofparser.OFPActionGroup(group_ID[(request,(primary_path[x],primary_path[x+1]))])]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')

                    #bucket creation (go to the next primary node)
                    max_len = 2000
                    actions = [ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])],0)]
                    weight = 0
                    watch_port = mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])]
                    watch_group = ofproto.OFPG_ANY
                    bucket = ofparser.OFPBucket(weight, watch_port, watch_group,actions)
                    group_entries_dict = add_group_entry(group_entries_dict,primary_path[x],group_ID[(request,(primary_path[x],primary_path[x+1]))],bucket)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'group')
            
            elif primary_path[x] in detect_nodes and primary_path[x] not in redirect_nodes:
                #[DETECT ONLY]
                '''
                se un edge node e' detect => non puo' non essere di redirect!
                '''
                #bucket creation (go to the next primary node)
                max_len = 2000
                actions = [ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])],0)]
                weight = 0
                watch_port = mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])]
                watch_group = ofproto.OFPG_ANY
                bucket = ofparser.OFPBucket(weight, watch_port, watch_group,actions)
                group_entries_dict = add_group_entry(group_entries_dict,primary_path[x],group_ID[(request,(primary_path[x],primary_path[x+1]))],bucket)
                flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'group')

                flow_entry = dict()
                flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x-1])],
                    eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),eth_type=0x8847, state=0)
                flow_entry['actions']=[ofparser.OFPActionGroup(group_ID[(request,(primary_path[x],primary_path[x+1]))])]
                flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                flow_entry['table_id']=0
                flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')
                '''
                nel nostro modello tutti i nodi di una request sono dei detect (tranne l'ultimo), quindi non puo' mai
                succedere che io sia solo request. l'unico modo sarebbe non gestire un fault di un certo link:
                in quel caso potrei essere un redirect puro...
                '''
            elif primary_path[x] not in detect_nodes and primary_path[x] in redirect_nodes:
                #REDIRECT ONLY
                flow_entry = dict()
                flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x-1])],
                    eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),eth_type=0x8847, state=0)
                flow_entry['actions']=[ofparser.OFPActionOutput(mn_topo_ports['s'+str(primary_path[x])]['s'+str(primary_path[x+1])],0)]
                flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                flow_entry['table_id']=0
                flow_entries_dict = add_flow_entry(flow_entries_dict,primary_path[x],flow_entry)
                flow_stats_dict = update_flow_stats(flow_stats_dict,primary_path[x],'primary')
        
        # for each fault of the current request
        for y in range(len(requests[request]['faults'].items())):
            
            fault = requests[request]['faults'].items()[y]
            #print "FAULT:", fault[0]
            tag = fault_ID[fault[0]]+16
            # MPLS label from 0 to 15 are reserved. Faults are numbered from 17. (tag=16 means NO FAULT)
            
            # [2] Detour Path rules
            # match(SRC_MAC, DST_MAC, in_port, TAG=ID_BROKEN_LINK) -> action(OUTPUT(NEXT_DETOUR_HOP))
            detour = requests[request]['faults'].items()[y][1]['detour_path']
            for z in range(1,len(detour)-1):
                #print "Installing Detour Node rules in node", detour[z]

                flow_entry = dict()
                flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(detour[z])]['s'+str(detour[z-1])],
                    eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                    eth_type=0x8847, mpls_label=tag)
                flow_entry['actions']=[ofparser.OFPActionOutput(mn_topo_ports['s'+str(detour[z])]['s'+str(detour[z+1])],0)]
                flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                flow_entry['table_id']=0
                flow_entries_dict = add_flow_entry(flow_entries_dict,detour[z],flow_entry)
                flow_stats_dict = update_flow_stats(flow_stats_dict,detour[z],'detour')

            #print "Installing Last Detour Node rules in node", detour[len(detour)-1]
            # match(SRC_MAC, DST_MAC, in_port, TAG=ID_BROKEN_LINK) -> action(OUTPUT(NEXT_PRIMARY_HOP), UNTAG)    

            # last detour node position in the primary path   
            l_d_n_index_in_p_p = primary_path.index(detour[len(detour)-1])
            
            flow_entry = dict()
            flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(detour[len(detour)-1])]['s'+str(detour[len(detour)-2])],
                    eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                    eth_type=0x8847, mpls_label=tag)
            if l_d_n_index_in_p_p == len(primary_path)-1: # l.d.n. is an edge switch
                flow_entry['actions']=[ofparser.OFPActionPopMpls(),
                    ofparser.OFPActionOutput(mn_topo_ports['s'+str(detour[len(detour)-1])]['h'+str(detour[len(detour)-1])],0)]
            else:
                flow_entry['actions']=[ofparser.OFPActionSetField(mpls_label=16),
                    ofparser.OFPActionOutput(mn_topo_ports['s'+str(detour[len(detour)-1])]['s'+str(primary_path[l_d_n_index_in_p_p + 1])],0)]
            flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
            flow_entry['table_id']=0
            flow_entries_dict = add_flow_entry(flow_entries_dict,detour[len(detour)-1],flow_entry)
            flow_stats_dict = update_flow_stats(flow_stats_dict,detour[len(detour)-1],'detour')

            # [3] Forward Back Path rules
            #match(SRC_MAC, DST_MAC, in_port, TAG=ID_BROKEN_LINK) -> action(OUTPUT(NEXT_FW_BACK_HOP))

            fw_back_path = requests[request]['faults'].items()[y][1]['fw_back_path']
            if fw_back_path != None:
                for z in range(1,len(fw_back_path)-1):
                    flow_entry = dict()
                    #print "Installing Forward back path rules in node", fw_back_path[z]
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(fw_back_path[z])]['s'+str(fw_back_path[z + 1])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        eth_type=0x8847, mpls_label=tag)
                    flow_entry['actions']=[ofparser.OFPActionOutput(mn_topo_ports['s'+str(fw_back_path[z])]['s'+str(fw_back_path[z - 1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,fw_back_path[z],flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,fw_back_path[z],'fw_back')

            # [4] Redirect node, Detect node and Detect&Redirect node rules
            redirect_node = requests[request]['faults'].items()[y][1]['redirect_node']
            detect_node = requests[request]['faults'].items()[y][1]['detect_node']

            # [4.1] Detect&Redirect node rules
            if redirect_node == detect_node:
                # match(SRC_MAC, DST_MAC, in_port, FLAG=LINK_DOWN) -> action(OUTPUT(DETOUR_PATH), TAG=ID_BROKEN_LINK)
                #print "Installing Detect & Redirect node rules in node", redirect_node
                # node position in the primary path   
                node_index_in_p_p = primary_path.index(redirect_node)

                if node_index_in_p_p == 0: # Detect&Redirect node is an edge switch
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['h'+str(redirect_node)],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        state=tag)
                    flow_entry['actions']=[ofparser.OFPActionPushMpls()]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions']),
                        ofparser.OFPInstructionGotoTable(1),
                        ofparser.OFPInstructionWriteMetadata(tag,0xffffffffffffffff)]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,redirect_node,'detect&red')

                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['h'+str(redirect_node)],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        eth_type=0x8847,metadata=tag)
                    flow_entry['actions']=[ofparser.OFPActionSetField(mpls_label=tag),
                        ofparser.OFPActionOutput(mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=1
                    flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)                

                else:
                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['s'+str(primary_path[node_index_in_p_p - 1])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),eth_type=0x8847,
                        state=tag)
                    flow_entry['actions']=[ofparser.OFPActionSetField(mpls_label=tag),
                        ofparser.OFPActionOutput(mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,redirect_node,'detect&red')

                #bucket creation (set state and redirect on detour path)
                max_len = 2000
                actions = [osparser.OFPExpActionSetState(state=tag, table_id=0),ofparser.OFPActionSetField(mpls_label=tag),
                    ofparser.OFPActionOutput(mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])],0)]
                weight = 0
                watch_port = mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])]
                watch_group = ofproto.OFPG_ANY
                bucket = ofparser.OFPBucket(weight, watch_port, watch_group,actions)
                group_entries_dict = add_group_entry(group_entries_dict,primary_path[node_index_in_p_p],group_ID[(request,(primary_path[node_index_in_p_p],primary_path[node_index_in_p_p+1]))],bucket)
                
            else:
                # [4.2] Redirect only node rules
                #print "Installing Redirect only node rules in node", redirect_node
                node_index_in_p_p = primary_path.index(redirect_node)

                #match(SRC_MAC, DST_MAC, in_port, TAG=ID_BROKEN_LINK) -> action(SET_STATE(FAULT_x), OUTPUT(DETOUR_PATH))
                flow_entry = dict()
                flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['s'+str(primary_path[node_index_in_p_p+1])],
                        eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                        eth_type=0x8847, mpls_label=tag)
                flow_entry['actions']=[osparser.OFPExpActionSetState(state=tag, table_id=0),
                    ofparser.OFPActionOutput(mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])],0)]
                flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                flow_entry['table_id']=0
                flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)
                flow_stats_dict = update_flow_stats(flow_stats_dict,redirect_node,'redirect_only')
                
                #match(SRC_MAC, DST_MAC, in_port, STATE=FAULT_X) -> action(output(DETOUR_PATH), TAG=ID_BROKEN_LINK)
                flow_entry = dict()
                if node_index_in_p_p == 0: # Redirect only node is an edge switch
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['h'+str(redirect_node)],
                            eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                            state=tag)
                    flow_entry['actions']=[ofparser.OFPActionPushMpls()]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions']),
                        ofparser.OFPInstructionGotoTable(1),
                        ofparser.OFPInstructionWriteMetadata(tag,0xffffffffffffffff)]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,redirect_node,'redirect_only')

                    flow_entry = dict()
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['h'+str(redirect_node)],
                            eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                            metadata=tag,eth_type=0x8847)
                    flow_entry['actions']=[ofparser.OFPActionSetField(mpls_label=tag),
                        ofparser.OFPActionOutput(mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=1
                    flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)
                else:
                    flow_entry['match']=ofparser.OFPMatch(in_port=mn_topo_ports['s'+str(redirect_node)]['s'+str(primary_path[node_index_in_p_p-1])],
                            eth_src=int_to_mac_str(request[0]),eth_dst=int_to_mac_str(request[1]),
                            eth_type=0x8847,state=tag)
                    flow_entry['actions']=[ofparser.OFPActionSetField(mpls_label=tag),
                        ofparser.OFPActionOutput(mn_topo_ports['s'+str(redirect_node)]['s'+str(detour[1])],0)]
                    flow_entry['inst']=[ofparser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, flow_entry['actions'])]
                    flow_entry['table_id']=0
                    flow_entries_dict = add_flow_entry(flow_entries_dict,redirect_node,flow_entry)
                    flow_stats_dict = update_flow_stats(flow_stats_dict,redirect_node,'redirect_only')

                # [4.3] Detect only node rules
                #match(SRC_MAC, DST_MAC, in_port, FLAG=LINK_DOWN) -> action(TAG(ID_BROKEN_LINK), output(FWD_BACK_PATH))
                #print "Installing Detect only node rules in node", detect_node
                node_index_in_p_p = primary_path.index(detect_node)

                #bucket creation (set mpls tag and forward back)
                max_len = 2000
                actions=[ofparser.OFPActionSetField(mpls_label=tag),
                    ofparser.OFPActionOutput(ofproto.OFPP_IN_PORT,0)]
                weight = 0
                watch_port = mn_topo_ports['s'+str(primary_path[node_index_in_p_p])]['s'+str(primary_path[node_index_in_p_p-1])]
                watch_group = ofproto.OFPG_ANY
                bucket = ofparser.OFPBucket(weight, watch_port, watch_group,actions)
                group_entries_dict = add_group_entry(group_entries_dict,primary_path[node_index_in_p_p],group_ID[(request,(primary_path[node_index_in_p_p],primary_path[node_index_in_p_p+1]))],bucket)

    time.sleep(3)

    # We have to remove the hosts from the network to draw it, since we have created hosts on demand and we don't have positions
    e = range(len(pos.items())+1,len(G.nodes())+2)
    G.remove_nodes_from(e)
    nx.draw(G, pos, node_size=300, font_size=10, node_color='w', ax=None, with_labels=True)

    if not os.path.exists('./figs'):
        os.makedirs('./figs')

    if (os.path.isfile('./figs/network.png')):
        os.remove('./figs/network.png')
    plt.savefig("./figs/network.png", format="PNG")
Example #36
0
def openXterm(hostname):
    global net
    makeTerm(net[hostname])
os.system("xterm -e 'ryu-manager ~/ryu/ryu/app/openstate/playground/forwarding_consistency_many_to_many_ctrl.py'&")



######Starting mininet
topos = { 'mytopo': ( lambda: MyTopo() ) }
mytopo=MyTopo()
time.sleep(1)
print("\n********************************** HELP *********************************************")
print("Type \"python ~/ryu/ryu/app/openstate/echo_server.py 200\" in h2's xterm")
print("Type \"nc 10.0.0.2 200\" in h1's xterm")
print("Watching the tcpdump results, it is possible to see that forwarding consistency is guaranteed\n"
      "In order to test new path selection, close and reopen netcat")
print("\nTo exit type \"ctrl+D\" or exit")
print("*************************************************************************************")
net = Mininet(topo=mytopo,switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,autoStaticArp=True,listenPort=6634)
net.start()
os.system("xterm -e 'tcpdump -i s4-eth1'&")
os.system("xterm -e 'tcpdump -i s4-eth2'&")
os.system("xterm -e 'tcpdump -i s4-eth3'&")
os.system("xterm -e 'tcpdump -i s4-eth4'&")
os.system("xterm -e 'tcpdump -i s4-eth5'&")
os.system("xterm -e 'tcpdump -i s4-eth6'&")
h1,h2  = net.hosts[0], net.hosts[1]
makeTerm(h1)
makeTerm(h2)
CLI(net)
net.stop()
os.system("sudo mn -c")
os.system("kill -9 $(pidof -x ryu-manager)")
# Kill Mininet and/or Ryu
os.system("sudo mn -c 2> /dev/null")
os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null")

print 'Starting Ryu controller'
os.system('ryu-manager ~/ryu/ryu/app/openstate/forwarding_consistency_1_to_many.py 2> /dev/null &')

print 'Starting Mininet'
net = Mininet(topo=SingleSwitchTopo(4),switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,listenPort=6634)
net.start()

time.sleep(5)
print 'Starting Echo Servers on h2, h3 and h4'

for h in [2,3,4]:
	makeTerm(net['h%d' % h],cmd='python ~/ryu/ryu/app/openstate/echo_server.py %d00' %h)

time.sleep(5)

CONN_NUM = 20
print 'Starting %d TCP connections from h1' %CONN_NUM
for n in range(CONN_NUM):
	net['h1'].cmd('(echo "HI!" | nc -q -1 10.0.0.2 80) &')

time.sleep(5)

established = {}
syn_recv = {}
for h in [2,3,4]:
	out = net['h%d' % h].cmd('(netstat -an | grep tcp | grep 10.0.0.%d:%d00)' % (h,h))
	established[h]=out.count("ESTABLISHED")
def topology(scenario: int, signal_window: int, scan_interval: float, disconnect_threshold: float,
             reconnect_threshold: float, scan_iface: bool = False, no_olsr: bool = False,
             qdisc_rates: dict = {'disconnect': 0, 'reconnect': 0}):
    """
    Build a custom topology and start it.

    Note: If you do not want to use a remote SDN controller but the controller class that is included in Mininet-Wifi you will have to change some
    """
    net = Mininet_wifi(topo=None, build=False, link=wmediumd, wmediumd_mode=interference, noise_th=-91, fading_cof=3,
                       autoAssociation=False, allAutoAssociation=False)

    info('*** Adding controller\n')
    # Use this if you have a remote controller (e.g. RYU controller) intalled and running in the background
    c0 = net.addController(name='c0', controller=RemoteController, ip='127.0.0.1', port=6633)

    # Use this instead if you want to use the SDN controller provided by Mininet-Wifi
    # c0 = net.addController(name='c0', controller=Controller)

    info('*** Adding switches/APs\n')
    # Use this SDN switch configuration if you use the RYU controller as a remote controller
    ap1 = net.addAccessPoint('ap1', ip='10.0.0.10', mac='00:00:00:00:01:00', listenPort=6634, dpid='0000000000000010',
                             ssid='ap1-ssid', mode='g', channel='1', position='30,50,0')

    # Use this if you are using the SDN controller provided by Mininet-Wifi
    # ap1 = net.addAccessPoint('ap1', ip='10.0.0.10', mac='00:00:00:00:01:00', ssid='ap1-ssid', mode='g', channel='1', position='30,50,0')

    info("*** Creating nodes\n")
    if scan_iface:
        scanif = 1
        sta1 = net.addStation('sta1', wlans=2, ip='10.0.0.1', position='30,10,0')
        sta2 = net.addStation('sta2', wlans=2, ip='10.0.0.2', position='10,40,0')
        sta3 = net.addStation('sta3', wlans=2, ip='10.0.0.3', position='50,40,0')
    else:
        scanif = 0
        sta1 = net.addStation('sta1', mac='00:00:00:00:00:01', ip='10.0.0.1', position='30,10,0')
        sta2 = net.addStation('sta2', mac='00:00:00:00:00:02', ip='10.0.0.2', position='10,40,0')
        sta3 = net.addStation('sta3', mac='00:00:00:00:00:03', ip='10.0.0.3', position='50,40,0')

    info("*** Configuring propagation model\n")
    net.setPropagationModel(model="logDistance", exp=4.4)

    info("*** Configuring wifi nodes\n")
    net.configureWifiNodes()

    if scenario > 1:
        info("*** Configuring moblity\n")
        if scenario == 2:
            trace_file = 'Scenario_2.csv'
            smooth_motion = False
            path = os.path.dirname(os.path.abspath(__file__)) + '/data/'
            get_trace([sta1, sta2, sta3], path + trace_file, smooth_motion)
            net.isReplaying = True
        if scenario == 3:
            trace_file = 'Scenario_3.csv'
            smooth_motion = False
            path = os.path.dirname(os.path.abspath(__file__)) + '/data/'
            get_trace([sta1, sta2, sta3], path + trace_file, smooth_motion)
            net.isReplaying = True
        if scenario == 4:
            trace_file = 'Scenario_1.csv'
            smooth_motion = False
            path = os.path.dirname(os.path.abspath(__file__)) + '/data/'
            get_trace([sta1, sta2, sta3], path + trace_file, smooth_motion)
            net.isReplaying = True

    info("*** Creating plot\n")
    net.plotGraph(max_x=100, max_y=100)

    info("*** Starting network\n")
    net.build()
    c0.start()
    net.get('ap1').start([c0])
    sleep(1)
    if scenario > 1:
        info("\n*** Replaying Mobility\n")
        ReplayingMobility(net)
    start_time = datetime.now()
    info("*** Starting flexible SDN script (time: {})\n".format(start_time.timestamp()))
    path = os.path.dirname(os.path.abspath(__file__))
    stat_dir = start_time.strftime('%Y-%m-%d_%H-%M-%S') + "/"
    statistics_dir = path + '/data/statistics/' + stat_dir
    if not os.path.isdir(statistics_dir):
        os.makedirs(statistics_dir)
    cmd = "python3"
    cmd += " {}/flexible_sdn.py".format(path)
    cmd += " -i sta1-wlan0"
    cmd += " -s {}".format(scan_interval)
    cmd += " -d {}".format(disconnect_threshold)
    cmd += " -r {}".format(reconnect_threshold)
    cmd += " -o {}".format(stat_dir)
    cmd += " -w {}".format(signal_window)
    cmd += " -t {}".format(start_time.timestamp())
    if scan_iface:
        cmd += " -S sta1-wlan1"
    if no_olsr:
        cmd += " -O"
    if qdisc_rates['disconnect'] > 0 and qdisc_rates['reconnect'] > 0:
        cmd += " -qr {} -qd {}".format(qdisc_rates['reconnect'], qdisc_rates['disconnect'])
    makeTerm(sta1, title='Station 1', cmd=cmd + " ; sleep 10")
    cmd = "python3"
    cmd += " {}/flexible_sdn.py".format(path)
    cmd += " -i sta3-wlan0"
    cmd += " -s {}".format(scan_interval)
    cmd += " -d {}".format(disconnect_threshold)
    cmd += " -r {}".format(reconnect_threshold)
    cmd += " -o {}".format(stat_dir)
    cmd += " -w {}".format(signal_window)
    cmd += " -t {}".format(start_time.timestamp())
    if scan_iface:
        cmd += " -S sta3-wlan1"
    if no_olsr:
        cmd += " -O"
    if qdisc_rates['disconnect'] > 0 and qdisc_rates['reconnect'] > 0:
        cmd += " -qr {} -qd {}".format(qdisc_rates['reconnect'], qdisc_rates['disconnect'])
    makeTerm(sta3, title='Station 3', cmd=cmd + " ; sleep 10")
    # cmd = "python3 {}/packet_sniffer.py -i sta1-wlan0 -o {}send_packets.csv -f 'icmp[icmptype] = icmp-echo'".format(path, stat_dir)
    # cmd = "python3 {}/packet_sniffer.py -i sta1-wlan0 -o {}send_packets.csv -f '-p udp -m udp --dport 8999' -T True".format(path, stat_dir)
    # makeTerm(sta1, title='Packet Sniffer sta1', cmd=cmd + " ; sleep 10")
    # cmd = "python3 {}/packet_sniffer.py -i sta3-wlan0 -o {}recv_packets.csv -f 'icmp[icmptype] = icmp-echo'".format(path, stat_dir)
    # cmd = "python3 {}/packet_sniffer.py -i sta3-wlan0 -o {}recv_packets.csv -f 'udp dst port 8999'".format(path, stat_dir)
    # makeTerm(sta3, title='Packet Sniffer sta3', cmd=cmd + " ; sleep 10")
    sleep(2)
    # info("*** Starting ping: sta1 (10.0.0.1) -> sta3 (10.0.0.3)\n")
    # makeTerm(sta1, title='ping', cmd="ping 10.0.0.3")
    info("*** Start sending generated packets: sta1 (10.0.0.1) -> sta3 (10.0.0.3)\n")
    makeTerm(sta3, title='Recv', cmd="ITGRecv -a 10.0.0.3 -i sta3-wlan0 -l {}/receiver.log".format(statistics_dir))
    makeTerm(sta1, title='Send', cmd="ITGSend -T UDP -C 10 -a 10.0.0.3 -c 1264 -s 0.123456 -t 170000 -l {}/sender.log ; sleep 10".format(statistics_dir))
    info("\n*** Running CLI\n")
    CLI(net)
    net.stop()
    os.system('sudo pkill xterm')
    out, err = subprocess.Popen(['pgrep', 'olsrd'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
    if out:
        subprocess.Popen(['killall', 'olsrd'])
    subprocess.Popen(["python3", "{}/eval_ditg.py".format(path), "-d", statistics_dir, "-t", str(start_time.timestamp())]).communicate()
    if no_olsr:
        plot_cmd = ["python3", "{}/plot_statistics.py".format(path), "-d", statistics_dir, '-O']
    else:
        plot_cmd = ["python3", "{}/plot_statistics.py".format(path), "-d", statistics_dir]
    subprocess.Popen(plot_cmd).communicate()
    os.system("chown -R wifi {}".format(path + '/data/statistics/'))
Example #40
0
 def startServer(self, server):
     return makeTerm(node=server,
                     cmd="python {} --id {} --servers {}".format(
                         self.path_to_server_code,
                         server.IP().replace('10.1.0.', ''),
                         ','.join(self.server_IPs)))
Example #41
0
def execInXterm(nodes, script):
    for node in nodes:
        makeTerm(node, title=str(node)+' '+ script , term='xterm', display=None,
            cmd='bash -c '+ script)
Example #42
0
 def startServer(self, server, nbOfServers):
     # Call mininet.term.makeTerm
     makeTerm(node=server,
              cmd="python server/server.py %s %d" %
              (server.IP().replace("10.1.0.", ""), nbOfServers))
Example #43
0
def myNet():

    ctl = '192.168.1.101'
    net = Mininet(topo=None, link=TCLink, build=False)

    # Create nodes
    h1 = net.addHost('h1', mac='01:00:00:00:01:00', ip='192.168.0.1/24')
    h2 = net.addHost('h2', mac='01:00:00:00:02:00', ip='192.168.0.2/24')
    h3 = net.addHost('h3', mac='01:00:00:00:03:00', ip='192.168.0.3/24')

    # Create switches
    s1 = net.addSwitch('s1', listenPort=6634, dpid='0000000000000010')
    s2 = net.addSwitch('s2', listenPort=6634, dpid='0000000000000020')
    s3 = net.addSwitch('s3', listenPort=6634, dpid='0000000000000030')

    print "*** Creating links"
    net.addLink(
        h1,
        s1,
    )
    net.addLink(
        h2,
        s2,
    )
    net.addLink(
        h3,
        s3,
    )
    net.addLink(
        s1,
        s2,
    )
    net.addLink(
        s1,
        s3,
    )

    # Add Controllers
    ctrl = net.addController('c1',
                             controller=RemoteController,
                             ip=ctl,
                             port=6633)

    net.build()

    # Connect each switch to a different controller
    s1.start([ctrl])
    s2.start([ctrl])
    s3.start([ctrl])

    print "Testing network connectivity\n"
    net.pingAll()
    print "Dumping host connections\n"
    dumpNodeConnections(net.hosts)

    h1, h2, h3, s1, s2, s3 = net.getNodeByName('h1', 'h2', 'h3', 's1', 's2',
                                               's3')

    s1.cmdPrint('ovs-vsctl show')

    h1.cmd('/sbin/tc qdisc del dev h1-eth0 root')
    sleep(3)
    h1.cmd('ifconfig h1-eth0 txqueuelen 10000')
    sleep(2)
    """
    to shape data at    9.6kbps -> 76800bit -> 76.8kbit
                        4.8kbps -> 38400bit -> 38.4kbit
                        2.4kbps -> 19200bit -> 19.2kbit
                        1.2kbps -> 9600bit  -> 9.6kbit
                        0.6kbps -> 4800bit  -> 4.8kbit
                        240kbps -> 1920000bit -> 1920kbit
                        250kbps -> 2000000bit -> 2000kbit
    """
    h1.cmd(
        '/sbin/tc qdisc add dev h1-eth0 root handle 1: htb default 11 && '
        '/sbin/tc class add dev h1-eth0 parent 1: classid 1:1 htb rate 250kbit ceil 250kbit burst 250kb && '
        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:11 htb rate 0.6kbit ceil 0.6kbit burst 10kb && '
        '/sbin/tc class add dev h1-eth0 parent 1:1 classid 1:12 htb rate 15kbit ceil 15kbit burst 240kb && '
        '/sbin/tc qdisc add dev h1-eth0 parent 1:11 handle 11: ets strict 2 quanta 900 600 priomap 3 3 2 1 0 1 1 1 1 1 1 1 1 1 1 1 && '
        '/sbin/tc qdisc add dev h1-eth0 parent 1:12 handle 12: ets strict 2 quanta 900 600 priomap 3 3 2 1 0 1 1 1 1 1 1 1 1 1 1 1 && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:1 handle 111: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:2 handle 112: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:3 handle 113: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 11:4 handle 114: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:1 handle 121: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:2 handle 122: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:3 handle 123: netem limit 1000 delay 5ms && '
        '/sbin/tc qdisc add dev h1-eth0 parent 12:4 handle 124: netem limit 1000 delay 5ms && '
        '/sbin/tc filter add dev h1-eth0 parent 1:0 protocol ip prio 1 u32 match ip src 192.168.0.1 match ip protocol 17 0xff flowid 1:1 && '
        '/sbin/tc filter add dev h1-eth0 parent 1:1 protocol ip prio 1 u32 match ip dst 192.168.0.2 match ip protocol 17 0xff flowid 1:11 && '
        '/sbin/tc filter add dev h1-eth0 parent 1:1 protocol ip prio 1 u32 match ip dst 192.168.0.3 match ip protocol 17 0xff flowid 1:12'
    )

    ssh_to_ctrl_1 = paramiko.SSHClient()
    ssh_to_ctrl_1.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh_to_ctrl_1.connect(hostname=ctl,
                          username='******',
                          password='******',
                          allow_agent=False,
                          look_for_keys=False)
    stdin, stdout, stderr = ssh_to_ctrl_1.exec_command(
        'python ~/sdn-tactical-network/rest_app/qos_app/qos_rest_icmcis.py')
    print "STDOUT:\n%s\n\nSTDERR:\n%s\n" % (stdout.read(), stderr.read())
    CLI(net)
    sleep(3)
    makeTerm(h2,
             title='mgen receiver h2',
             cmd="mgen input receive_h2.mgn output receive_log_h2.txt")
    makeTerm(h3,
             title='mgen receiver h3',
             cmd="mgen input receive_h3.mgn output receive_log_h3.txt")
    makeTerm(h1,
             title='class statistics',
             cmd="watch -dc tc -s -d -j class show dev h1-eth0")
    makeTerm(h1,
             title='qdisc statistics',
             cmd="watch -dc tc -s -d qdisc show dev h1-eth0")
    makeTerm(h2,
             title='packet sniffer receiver h2',
             cmd="sudo python packet_sniffer_receiver_h2.py")
    makeTerm(h3,
             title='packet sniffer receiver h3',
             cmd="sudo python packet_sniffer_receiver_h3.py")
    makeTerm(h1,
             title='packet sniffer sender',
             cmd="sudo python packet_sniffer_sender.py")
    #makeTerm(h1, title='qdisc logger', cmd="python query_qdisc_log.py")
    sleep(1)
    # s1_s2_interface = s1.intf(intf='s1-eth2')
    # s1_s3_interface = s1.intf(intf='s1-eth3')
    # # target_bw_s1_to_s2 = 0.0006  # 0.6 kbps => 0.0006 Mbit/s
    # target_bw_s1_to_s2 = 0.0012  # 1.2 kbps => 0.0012 Mbit/s
    # # target_bw_s1_to_s2 = 0.0024  # 2.4 kbps => 0.0024 Mbit/s
    # # target_bw_s1_to_s2 = 0.0048  # 4.8 kbps => 0.0048 Mbit/s
    # # target_bw_s1_to_s2 = 0.0096  # 9.6 kbps => 0.0096 Mbit/s
    # # target_bw_s1_to_s3 = 0.015  # 15 kbps => 0.015 Mbit/s
    # target_bw_s1_to_s3 = 0.03  # 30 kbps => 0.03 Mbit/s
    # # target_bw_s1_to_s3 = 0.06  # 60 kbps => 0.06 Mbit/s
    # # target_bw_s1_to_s3 = 0.12  # 120 kbps => 0.12 Mbit/s
    # # target_bw_s1_to_s3 = 0.24  # 240 kbps => 0.24 Mbit/s
    # info("Setting BW Limit for Interface " + str(s1_s2_interface) + " to " + str(target_bw_s1_to_s2) + "\n")
    # info("Setting BW Limit for Interface " + str(s1_s3_interface) + " to " + str(target_bw_s1_to_s3) + "\n")
    # # change the bandwidth of link to target bandwidth
    # s1_s2_interface.config(bw=target_bw_s1_to_s2, smooth_change=True)
    # sleep(1)
    # s1_s3_interface.config(bw=target_bw_s1_to_s3, smooth_change=True)
    sleep(2)
    makeTerm(h1, title='mgen sender to h2', cmd="mgen input send.mgn")
    #makeTerm(h1, title='mgen sender to h3', cmd="mgen input send_h3.mgn")
    sleep(1)

    CLI(net)
    net.stop()
    os.system('sudo mn -c')
Example #44
0
def topology(nodes, rep):
    "Create a network."
    net = Mininet_wifi(controller=Controller, accessPoint=OVSKernelAP)

    info("*** Creating nodes\n")
    sta1 = net.addStation('sta1')
    sta2 = net.addStation('sta2')
    sta3 = net.addStation('sta3')
    sta4 = net.addStation('sta4')
    sta5 = net.addStation('sta5')
    sta6 = net.addStation('sta6')
    sta7 = net.addStation('sta7')
    sta8 = net.addStation('sta8')
    sta9 = net.addStation('sta9')
    sta10 = net.addStation('sta10')
    sta11 = net.addStation('sta11')
    sta12 = net.addStation('sta12')
    sta13 = net.addStation('sta13')
    sta14 = net.addStation('sta14')
    sta15 = net.addStation('sta15')
    sta16 = net.addStation('sta16')
    sta17 = net.addStation('sta17')
    sta18 = net.addStation('sta18')
    sta19 = net.addStation('sta19')
    sta20 = net.addStation('sta20')
    sta21 = net.addStation('sta21')
    sta22 = net.addStation('sta22')
    sta23 = net.addStation('sta23')
    sta24 = net.addStation('sta24')
    sta25 = net.addStation('sta25')

    h1 = net.addHost('h1')
    h2 = net.addHost('h2')

    ap1 = net.addAccessPoint('ap1', ssid="simplewifi", mode="g", channel="5")

    c0 = net.addController('c0', controller=Controller, ip='127.0.0.1',
                           port=6633)

    info("*** Configuring wifi nodes\n")
    net.configureWifiNodes()

    info("*** Associating Stations\n")
    net.addLink(sta1, ap1)
    net.addLink(sta2, ap1)
    net.addLink(sta3, ap1)
    net.addLink(sta4, ap1)
    net.addLink(sta5, ap1)
    net.addLink(sta6, ap1)
    net.addLink(sta7, ap1)
    net.addLink(sta8, ap1)
    net.addLink(sta9, ap1)
    net.addLink(sta10, ap1)
    net.addLink(sta11, ap1)
    net.addLink(sta12, ap1)
    net.addLink(sta13, ap1)
    net.addLink(sta14, ap1)
    net.addLink(sta15, ap1)
    net.addLink(sta16, ap1)
    net.addLink(sta17, ap1)
    net.addLink(sta18, ap1)
    net.addLink(sta19, ap1)
    net.addLink(sta20, ap1)
    net.addLink(sta21, ap1)
    net.addLink(sta22, ap1)
    net.addLink(sta23, ap1)
    net.addLink(sta24, ap1)
    net.addLink(sta25, ap1)

    net.addLink(h1, ap1)
    net.addLink(h2, ap1)

    info("*** Starting network\n")
    net.build()
    c0.start()
    ap1.start([c0])

    info("*** Ping All\n")
    net.pingAll()

    h1.cmd('sudo iperf -s -u -i 1 -t 30 > iperf_s_n' + nodes + '_r' + rep + ' &')
    h2.sendCmd('iperf -u -c ' + h1.IP() + ' -b 10M -i 1 -t 30 > iperf_c_n'+ nodes +'_r' + rep)
    # removing the SDN Controller
    ap1.cmd("ovs-vsctl --db=unix:/var/run/openvswitch/db.sock del-controller ap1")

    makeTerm( sta1, cmd="python /media/sf_shared/node.py 10.0.0.1 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta2, cmd="python /media/sf_shared/node.py 10.0.0.2 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta3, cmd="python /media/sf_shared/node.py 10.0.0.3 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta4, cmd="python /media/sf_shared/node.py 10.0.0.4 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta5, cmd="python /media/sf_shared/node.py 10.0.0.5 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta6, cmd="python /media/sf_shared/node.py 10.0.0.6 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta7, cmd="python /media/sf_shared/node.py 10.0.0.7 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta8, cmd="python /media/sf_shared/node.py 10.0.0.8 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta9, cmd="python /media/sf_shared/node.py 10.0.0.9 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta10, cmd="python /media/sf_shared/node.py 10.0.0.10 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta11, cmd="python /media/sf_shared/node.py 10.0.0.11 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta12, cmd="python /media/sf_shared/node.py 10.0.0.12 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta13, cmd="python /media/sf_shared/node.py 10.0.0.13 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta14, cmd="python /media/sf_shared/node.py 10.0.0.14 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta15, cmd="python /media/sf_shared/node.py 10.0.0.15 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta16, cmd="python /media/sf_shared/node.py 10.0.0.16 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta17, cmd="python /media/sf_shared/node.py 10.0.0.17 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta18, cmd="python /media/sf_shared/node.py 10.0.0.18 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta19, cmd="python /media/sf_shared/node.py 10.0.0.19 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta20, cmd="python /media/sf_shared/node.py 10.0.0.20 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta21, cmd="python /media/sf_shared/node.py 10.0.0.21 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta22, cmd="python /media/sf_shared/node.py 10.0.0.22 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta23, cmd="python /media/sf_shared/node.py 10.0.0.23 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta24, cmd="python /media/sf_shared/node.py 10.0.0.24 -nodes 25 -rep 1;sleep 2" )
    makeTerm( sta25, cmd="python /media/sf_shared/node.py 10.0.0.25 -nodes 25 -rep 1;sleep 2" )

    info("*** Waiting for iperf to terminate.\n")
    results = {}
    results[h2] = h2.waitOutput()
    h1.cmd('kill $!')

    # info("*** Running CLI\n")
    # CLI_wifi(net)

    info("*** Stopping network\n")
    net.stop()
Example #45
0
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.term import makeTerm

if '__main__' == __name__:

	net = Mininet(controller=RemoteController)
	c0 = net.addController('c0',ip='192.168.99.101', port=6633)

	s1 = net.addSwitch('s1')

	h1 = net.addHost('h1', mac='00:00:00:00:00:01')
	h2 = net.addHost('h2', mac='00:00:00:00:00:02')
	h3 = net.addHost('h3', mac='00:00:00:00:00:03')

	net.addLink(s1, h1)
	net.addLink(s1, h2)
	net.addLink(s1, h3)

	net.build()

	c0.start()
	s1.start([c0])

	net.terms.append(makeTerm(s1))

	CLI(net)
	net.stop()