def run_exercise(): #Create and start a new network with our custom topology topo = TLSTopo() net = Mininet(topo=topo) net.start() net.pingAll() processes = [] #Start Nginx HTTP-server processes.append(net["Server-1"].popen( 'nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_1.conf')) processes.append(net["Server-2"].popen( 'nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_2.conf')) processes.append(net["Server-3"].popen( 'nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_3.conf')) processes.append(net["Server-4"].popen( 'nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_4.conf')) processes.append(net["Server-5"].popen( 'nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_5.conf')) #Open wireshark processes.append(net["A"].popen('wireshark')) #Open terminals processes.append(makeTerms([net["A"]], title="Student terminal")[0]) raw_input("Press Enter to exit....") for process in processes: process.kill() Cleanup.cleanup()
def test_main(module): """Test main.""" setLogLevel('error') print('testing module %s' % module) (requested_test_classes, clean, dumpfail, keep_logs, nocheck, serial, repeat, excluded_test_classes, report_json_filename, port_order) = parse_args() if clean: print('Cleaning up test interfaces, processes and openvswitch ' 'configuration from previous test runs') Cleanup.cleanup() sys.exit(0) if nocheck: print('Skipping dependency checks') else: if not check_dependencies(): print('dependency check failed. check required library/binary ' 'list in header of this script') sys.exit(-1) print("port order: -o", ','.join(str(i) for i in port_order)) hw_config = import_hw_config() run_tests(module, hw_config, requested_test_classes, dumpfail, keep_logs, serial, repeat, excluded_test_classes, report_json_filename, port_order)
def run_net_model_1(): topo = NetModel_1_topo() netopts = dict( topo = topo, controller = RemoteController, link = TCLink, autoSetMacs = True, autoStaticArp = True, cleanup = True) print "***Starting NetModel_1 configuration..." net = Mininet (**netopts) net.start() srv1 = net.getNodeByName("srv1") srv2 = net.getNodeByName("srv2") print "***Starting UDP Server..." srv1.cmd('iperf --single_udp -s -u &') srv2.cmd('iperf --single_udp -s -u &') srv1_udp_pid = int( srv1.cmd('echo $!') ) srv2_udp_pid = int( srv2.cmd('echo $!') ) print "UDP Server started on srv1 with PID ", srv1_udp_pid print "UDP Server started on srv2 with PID ", srv2_udp_pid print "***Starting TCP Server..." srv1.cmd('iperf -s &') srv2.cmd('iperf -s &') srv1_tcp_pid = int(srv1.cmd('echo $!')) srv2_tcp_pid = int(srv2.cmd('echo $!')) print "TCP Server started on srv1 with PID ", srv1_tcp_pid print "TCP Server started on srv2 with PID ", srv2_tcp_pid print "Running CLI..." CLI(net) print "Killing mininet..." cleaner.cleanup()
def run_exercise(): #Create and start a new network with our custom topology topo = TLSTopo() net = Mininet(topo=topo) net.start() net.pingAll() processes = [] #Start Nginx HTTP-server processes.append(net["Server-1"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_1.conf')) processes.append(net["Server-2"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_2.conf')) processes.append(net["Server-3"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_3.conf')) processes.append(net["Server-4"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_4.conf')) processes.append(net["Server-5"].popen('nginx -p /home/vagrant/assignments/ssl_tls/nginx -c nginx_5.conf')) #Open wireshark processes.append(net["A"].popen('wireshark')) #Open terminals processes.append(makeTerms([net["A"]], title="Student terminal")[0]) raw_input("Press Enter to exit....") for process in processes: process.kill() Cleanup.cleanup()
def test_main(): """Test main.""" setLogLevel('error') (requested_test_classes, clean, dumpfail, keep_logs, nocheck, serial, excluded_test_classes) = parse_args() if clean: print('Cleaning up test interfaces, processes and openvswitch ' 'configuration from previous test runs') Cleanup.cleanup() sys.exit(0) if nocheck: print('Skipping dependencies/lint checks') else: if not check_dependencies(): print('dependency check failed. check required library/binary ' 'list in header of this script') sys.exit(-1) if not lint_check(): print('pylint must pass with no errors') sys.exit(-1) hw_config = None run_tests( hw_config, requested_test_classes, dumpfail, keep_logs, serial, excluded_test_classes)
def __init__(self): setLogLevel('info') Cleanup.cleanup() # clean up any running mininet network self.enable_sflow_rt() # compile and run sflow-rt helper script self.net = Mininet( topo=TopoThree(), controller=lambda name: RemoteController( name, ip='127.0.0.1', port=6633, protocol='tcp')) self.net.start()
def runMininet(): Cleanup.cleanup() #Cleaning up the Mininet before start "You can change the bottleneck link property by modifing these parameter" myTopo = Assignment3Topo(bw_v=10, delay_v="10ms", loss_v=0) #make test network topology net = Mininet(topo=myTopo, link=TCLink) #make Mininet instance net.start() #start Mininet "Get Host and host's ip address" hosts = net.hosts print("Starting test...") receiver = hosts[0] sender = hosts[1] recvAddr = receiver.IP() sendAddr = sender.IP() "Get parameter to run the test" windowSize = int(sys.argv[1]) srcFilename = sys.argv[2] dstFilename = sys.argv[3] "execute ping for establish switching table" net.pingAll(timeout="30ms") #This code must not be removed "If you want to test with ping and iperf, uncomment this" """ net.pingFull([receiver,sender]) net.iperf([receiver,sender],seconds=10) """ popens = {} "If your code is python, uncomment this" """ popens[receiver] = receiver.popen('python3','receiver.py') popens[sender] = sender.popen('python3','sender.py',recvAddr, str(windowSize), srcFilename, dstFilename) """ endTime = time() + 180 #after 3 minutes mininet test will be shut "If sender or receiver print something, pmonitor will let you to see it" for h, line in pmonitor(popens, timeoutms=500): if h: print('<%s>: %s' % (h.name, line)) #, if time() >= endTime: for p in popens.values(): p.send_signal(SIGINT) net.stop() #stop Mininet print("Testing fininshed")
def removeLost(): import os cmd1 = "sudo docker rm -f $(sudo docker ps -a | grep mn | awk '{print $1}')" cmd2 = "sudo mn --wifi" cmd3 = "sudo docker stop -t 15 $(sudo docker ps -a | grep mn | awk '{print $1}')" info("Using mininet cleanup\n") Cleanup.cleanup() info("Stopping containers\n") os.system(cmd3) info("Removing containers\n") os.system(cmd1) info("IF cleanup was not sucessful. Reboot Machine!\n")
def performance_test(spread, depth, bandwidth, delay, loss, ping_all, iperf, quick, log): if quick: spread = 3 depth = 3 bandwidth = 500 delay = '0ms' loss = 0 ping_all = True iperf = True log = 'info' Cleanup.cleanup() setLogLevel(log) logger = logging.getLogger(__name__) if log == 'debug': logger.setLevel(logging.DEBUG) setup_logging(default_level=logging.DEBUG) elif log == 'warning': logger.setLevel(logging.WARNING) setup_logging(default_level=logging.WARNING) elif log == 'error': logger.setLevel(logging.ERROR) setup_logging(default_level=logging.ERROR) elif log == 'critical': logger.setLevel(logging.CRITICAL) setup_logging(default_level=logging.CRITICAL) else: logger.setLevel(logging.INFO) setup_logging(default_level=logging.INFO) "Create network and run simple performance test" topo = TreeTopoGeneric(spread, depth, bandwidth, delay, loss) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, autoStaticArp=True) net.start() logger.info("Dumping host connections") dumpNodeConnections(net.hosts) if ping_all: logger.info("Running ping test between all hosts") net.pingAll() if iperf: logger.info("Testing bandwidth between first and last hosts") net.iperf() net.stop()
def test_main(modules, serial_override=None): """Test main.""" print('testing module %s' % modules) (requested_test_classes, regex_test_classes, clean, dumpfail, debug, keep_logs, nocheck, serial, repeat, excluded_test_classes, report_json_filename, port_order, start_port, loglevel, profile) = parse_args() if serial_override is not None: print('overriding serial to ', serial_override) serial = serial_override setLogLevel(loglevel) if clean: print('Cleaning up test interfaces, processes and openvswitch ' 'configuration from previous test runs') Cleanup.cleanup() sys.exit(0) if nocheck: print('Skipping dependency checks') else: if not check_dependencies(): print('dependency check failed. check required library/binary ' 'list in header of this script') sys.exit(-1) print('port order: -o', ','.join(str(i) for i in port_order)) print('start port: --port %s' % start_port) hw_config = import_hw_config() if profile: # use wall clock time pr = cProfile.Profile(time.time) # pylint: disable=invalid-name pr.enable() run_tests(modules, hw_config, requested_test_classes, regex_test_classes, dumpfail, debug, keep_logs, serial, repeat, excluded_test_classes, report_json_filename, port_order, start_port) if profile: pr.disable() ps = pstats.Stats(pr).sort_stats('cumulative') # pylint: disable=invalid-name ps.print_stats()
def deployTopo(self): self.topo = self.net.start() time.sleep(1) # clean previous rules self.odl.clean(self.callBackConfs['chain'], self.callBackConfs['sw']) time.sleep(1) # self.odl.post(self.odl.controller, self.odl.DEFAULT_PORT, self.odl.DISABLE_STATISTICS, self.odl.disableStatistics(), True) self.deploySwConf() self.deployHostConf() self.deploySfConf() self.deployODLConf() #deploy chain self.deploySfc() self.disableOffLoadFromIfaces() CLI(self.net) self.odl.clean(self.callBackConfs['chain'], self.callBackConfs['sw']) self.cleanProcess() Cleanup()
def after_scenario(context, scenario): ###################################### #OpenStack Part #IF OPENSTACK=True ###################################### if (context.openStackTest): if (context.tf.readyToDestroy == True): context.tf.destroy() pass else: print("Please destroy infrastructure manually.") else: ###################################### #Mininet & Controller Part #IF OPENSTACK=False ###################################### if (context.onosFlag): context.onosRest.removeOnosIntents() context.onosFlag = False Cleanup.cleanup()
def run_all_tests_sequentially(): run_index = 0 for delay in DELAYS: for bw in BANDWIDTHS: for loss in LOSSES: for pl in PAYLOADS: if run_index < 10: run_index += 1 continue performance_test("./TcpMQ/mqsub/mqsub -h=192.168.0.3:1883", "./TcpMQ/mqpub/mqpub -h=192.168.0.3:1883", "./TcpMQ/surgemq/surgemq", run_index, RUN_TIME, pl, delay=str(delay) + "ms", bw=bw, loss=loss) run_index += 1 Cleanup.cleanup()
def run_all_tests_sequentially(): for delay in DELAYS: for bw in BANDWIDTHS: for loss in LOSSES: performance_test('quic', delay=str(delay) + "ms", bw=bw, loss=loss) Cleanup.cleanup() for delay in DELAYS: for bw in BANDWIDTHS: for loss in LOSSES: performance_test('tcp', delay=str(delay) + "ms", bw=bw, loss=loss) Cleanup.cleanup()
def test_main(module): """Test main.""" setLogLevel('error') print('testing module %s' % module) (requested_test_classes, clean, dumpfail, keep_logs, nocheck, serial, excluded_test_classes, report_json_filename) = parse_args() if clean: print('Cleaning up test interfaces, processes and openvswitch ' 'configuration from previous test runs') Cleanup.cleanup() sys.exit(0) if nocheck: print('Skipping dependency checks') else: if not check_dependencies(): print('dependency check failed. check required library/binary ' 'list in header of this script') sys.exit(-1) hw_config = import_hw_config() run_tests( module, hw_config, requested_test_classes, dumpfail, keep_logs, serial, excluded_test_classes, report_json_filename)
def perfTest(): topo = MyTopo() net = Mininet(topo=topo, link=TCLink, controller=None) net.addController('mycontroller', controller=RemoteController, ip='127.0.0.1') net.start() print('Dumping host connections') dumpNodeConnections(net.hosts) print('Testing network connectivity') net.pingFull() print('Testing bandwidth between pods') h01, h02, h31 = net.get('h01', 'h02', 'h31') # iperf server1 in pod 0 h01.popen('iperf -s -u -i 1 > diff_pod', shell=True) # iperf server2 in pod 3 h31.popen('iperf -s -u -i 1 > same_pod', shell=True) # iperf client send to server1 & server2 h02.cmdPrint('iperf -c ' + h01.IP() + ' -u -t 10 -i 1 -b 100m') h02.cmdPrint('iperf -c ' + h31.IP() + ' -u -t 10 -i 1 -b 100m') net.stop() Cleanup()
def sigint_handler(signum, frame): global net net.stop() Cleanup.cleanup() sys.exit()
#h7.cmd('ethtool -s h7-eth0 speed 1 duplex full autoneg off'); #h8.cmd('ethtool -s h8-eth0 speed 1 duplex full autoneg off'); #h9.cmd('ethtool -s h9-eth0 speed 1 duplex full autoneg off'); #h10.cmd('ethtool -s h10-eth0 speed 1 duplex full autoneg off'); info('*** Configure switches (Open vSwitch w/ OpenFlow13)\n'); #s1.cmd('ifconfig s1 10.0.0.64'); #s2.cmd('ifconfig s2 10.0.0.65'); #s3.cmd('ifconfig s3 10.0.0.66'); #s4.cmd('ifconfig s4 10.0.0.67'); #s5.cmd('ifconfig s5 10.0.0.68'); #s1.cmd('ethtool -s s1 speed 1 duplex full autoneg off'); #s2.cmd('ethtool -s s2 speed 1 duplex full autoneg off'); #s3.cmd('ethtool -s s3 speed 1 duplex full autoneg off'); #s4.cmd('ethtool -s s4 speed 1 duplex full autoneg off'); #s5.cmd('ethtool -s s5 speed 1 duplex full autoneg off'); #s1.cmd('ovs-vsctl set bridge s1 stp-enable=true rstp-enable=true'); #s2.cmd('ovs-vsctl set bridge s2 stp-enable=true rstp-enable=true'); #s3.cmd('ovs-vsctl set bridge s3 stp-enable=true rstp-enable=true'); #s4.cmd('ovs-vsctl set bridge s4 stp-enable=true rstp-enable=true'); #s5.cmd('ovs-vsctl set bridge s5 stp-enable=true rstp-enable=true'); info('*** Switching to CLI\n'); #CLI(net); info('*** Stopping Network\n'); net.stop(); info('*** Cleaning Up\n'); Cleanup.cleanup();
average = 0 for f in outfiles.values(): start_flag = False bw = 0.0 with open(f, 'r') as o: for line in o: if start_flag == True: a = line.rfind('/sec') if a < 0: continue a = line[0:a].rfind(' ') b = line[0:a].rfind(' ') bw = float(line[b+1:a]) if 'Bandwidth' in line: start_flag = True print(f, bw) average += bw average = float(average) / len(outfiles.values()) print ('average bandwidth between all pairs = '+str(average)) # output arq = open('average1flow-ECMP.dat','w') arq.write('1 flow - ECMP - 20 servers average: ' + str(average) +'\n') arq.close() # CLI(net) net.stop() Cleanup.cleanup()
# are slow to get answered. ping_front1 = net.getNodeByName("h1").cmd("ping -c 1 -w 60 10.0.2.100") if re.search('1 received', ping_front1): successful_pings += 1 ping_front2 = net.getNodeByName("h2").cmd("ping -c 1 -w 60 10.0.2.100") if re.search('1 received', ping_front2): successful_pings += 1 return ((2 - successful_pings) / 2.0) * 100.0 if os.getenv("SUDO_USER") == None: print "This program need 'sudo'" exit() # Clean up from the last disaster Cleanup() for proc in psutil.process_iter(): if proc.name() == "frenetic.native" or proc.name() == "openflow.native": proc.kill() # Test Suite pingall_test("quick_start", "repeater.py", topo=SingleSwitchTopo(2)) pingall_test("netkat_principles", "repeater2.py", topo=SingleSwitchTopo(2)) pingall_test("netkat_principles", "repeater3.py", expect_pct=100) pingall_test("netkat_principles", "repeater4.py") pingall_test("netkat_principles", "repeater5.py") pingall_test("l2_learning_switch", "learning1.py") pingall_test("l2_learning_switch", "learning2.py", expect_pct=100) pingall_test("l2_learning_switch", "learning3.py") pingall_test("l2_learning_switch", "learning4.py") pingall_test("handling_vlans", "vlan1.py", expect_pct=66)
def mplexExperiment( n = 5, inc_function = None, udp = False, pdrop_min = 0.0, pdrop_max = 1.0, pdrop_stride = 0.1, playback_file = "../../data/pcaps/bigFlows.pcap", playback = False, linkopts = {"delay": 20}, base_iperf_port = 5201, csv_dir = None, result_interval = 1.0, max_pdrop = 0.4, step_interval = 3.0, n_steps = 10, force_cmd_routes = False, ): if inc_function is None: inc_function = lambda last, i, n: float(i+1) # helpers initd_host_count = [0] initd_switch_count = [0] next_ip = [1] def newNamedHost(**kw_args): o = net.addHost("h{}".format(initd_host_count[0]), **kw_args) initd_host_count[0] += 1 return o def newNamedSwitch(**kw_args): o = net.addSwitch("s{}".format(initd_switch_count[0]), listenPort=7000+initd_switch_count[0], **kw_args) initd_switch_count[0] += 1 return o def assignIP(node): node.setIP("10.0.0.{}".format(next_ip[0]), 24) next_ip[0] += 1 def trackedLink(src, target, extras=None): if extras is None: extras = linkopts l = net.addLink(src, target, **extras) return l route_commands = [[]] switch_sockets = [{}] def openSwitchSocket(switch): if switch.name in switch_sockets[0]: killsock(switch_sockets[0][switch.name]) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("127.0.0.1", switch.listenPort)) s.sendall(ofpb.ofp_hello(None, None)) ofpp.parse(s.recv(8)) switch.control_socket = s switch_sockets[0][switch.name] = s return s def killsock(s): try: s.shutdown(socket.SHUT_RDWR) except: pass s.close() def removeAllSockets(): for _, sock in switch_sockets[0].viewitems(): killsock(sock) switch_sockets[0] = {} def updateOneRoute(switch, cmd_list, msg): if force_cmd_routes or not switch.listenPort: switch.cmd(*cmd_list) else: s = (switch_sockets[0][switch.name] if switch.name in switch_sockets[0] else openSwitchSocket(switch) ) # seems like pipes can randomly break. oops! sent = False while not sent: try: s.sendall(msg) sent = True except: s = openSwitchSocket(switch) def executeRouteQueue(): for el in route_commands[0]: updateOneRoute(*el) route_commands[0] = [] # Downstream should be computed similarly, but without the probdrops or anything like that. flow_pdrop_msg = ofpb.ofp_flow_mod( None, 0, 0, 0, ofp.OFPFC_ADD, 0, 0, 1, None, None, None, 0, 1, ofpb.ofp_match(None, None, None), ofpb.ofp_instruction_actions(ofp.OFPIT_WRITE_ACTIONS, None, [ # Looks like 29 is the number I picked for Pdrop. ofpb._pack("HHI", 29, 8, 0xffffffff), ofpb.ofp_action_output(None, 16, 1, 65535) ]) ) def updateUpstreamRoute(switch, out_port=1, ac_prob=0.0, ip="10.0.0.1"): # Turn from prob_drop into prob_send! prob = 1 - ac_prob p_drop_num = pdrop(prob) #p_drop = "" if ac_prob == 0.0 else "probdrop:{},".format(p_drop_num) p_drop = "probdrop:{},".format(p_drop_num) cmd_tail = [ "in_port=*,ip,nw_dst={},actions={}\"{}-eth{}\"".format(ip, p_drop, switch.name, out_port) ] msg = ofpb.ofp_flow_mod( None, 0, 0, 0, ofp.OFPFC_ADD, 0, 0, 1, None, None, None, 0, 1, ofpb.ofp_match(ofp.OFPMT_OXM, None, [ # Need to match Dest IP against curr host's. ofpm.build(None, ofp.OFPXMT_OFB_ETH_TYPE, False, 0, 0x0800, None), ofpm.build(None, ofp.OFPXMT_OFB_IPV4_DST, False, 0, socket.inet_aton(ip), None) ]), ofpb.ofp_instruction_actions(ofp.OFPIT_WRITE_ACTIONS, None, [ # Looks like 29 is the number I picked for Pdrop. ofpb._pack("HHI", 29, 8, p_drop_num), ofpb.ofp_action_output(None, 16, out_port, 65535) ]) ) switchRouteCommand(switch, cmd_tail, msg) def updateDownstreamRoute(switch, host, out_port): ip = host.IP() cmd_tail = [ "in_port=*,ip,nw_dst={},actions=\"{}-eth{}\"".format(ip,switch.name, out_port) ] msg = ofpb.ofp_flow_mod( None, 0, 0, 0, ofp.OFPFC_ADD, 0, 0, 1, None, None, None, 0, 1, ofpb.ofp_match(ofp.OFPMT_OXM, None, [ # Need to match Dest IP against curr host's. ofpm.build(None, ofp.OFPXMT_OFB_ETH_TYPE, False, 0, 0x0800, None), ofpm.build(None, ofp.OFPXMT_OFB_IPV4_DST, False, 0, socket.inet_aton(ip), None) ]), ofpb.ofp_instruction_actions(ofp.OFPIT_WRITE_ACTIONS, None, [ ofpb.ofp_action_output(None, 16, out_port, 65535) ]) ) switchRouteCommand(switch, cmd_tail, msg) def floodRoute(switch): cmd_tail = [ "actions=flood" ] msg = ofpb.ofp_flow_mod( None, 0, 0, 0, ofp.OFPFC_ADD, 0, 0, 1, None, None, None, 0, 1, ofpb.ofp_match(None, None, None), ofpb.ofp_instruction_actions(ofp.OFPIT_WRITE_ACTIONS, None, [ # Looks like 29 is the number I picked for Pdrop. ofpb.ofp_action_output(None, 16, ofp.OFPP_FLOOD, 65535) ]) ) switchRouteCommand(switch, cmd_tail, msg) def switchRouteCommand(switch, cmd_tail, msg): name = switch.name if not switch.listenPort: listenAddr = "unix:/tmp/{}.listen".format(switch.name) else: listenAddr = "tcp:127.0.0.1:{}".format(switch.listenPort) cmd_list = [ "ovs-ofctl", "add-flow", listenAddr ] + cmd_tail if alive: updateOneRoute(switch, cmd_list, msg) else: route_commands[0].append((switch, cmd_list, msg)) def enactActions(learners, sarsas): for (node, sarsa) in zip(learners, sarsas): (_, action, _) = sarsa.last_act updateUpstreamRoute(node, ac_prob=action) def buildNet(n_hosts): server = newNamedHost() server_switch = newNamedSwitch() core_link = trackedLink(server, server_switch) assignIP(server) hosts = [] last_bw = 0.0 for i in xrange(n_hosts): h = newNamedHost() bw = inc_function(last_bw, i, n_hosts) last_bw = bw opts = linkopts opts["bw"] = bw trackedLink(server_switch, h, extras=opts) hosts.append((h, bw)) assignIP(h) updateDownstreamRoute(server_switch, h, i+2) updateUpstreamRoute(server_switch) floodRoute(server_switch) return (server, core_link, server_switch, hosts) def pdrop(prob): return int(prob * 0xffffffff) def fxrange(start, stop, step): curr = start while curr <= stop: yield curr curr += step ### THE EXPERIMENT? ### net = Mininet(link=TCLink) alive = False Cleanup.cleanup() (server, core_link, server_switch, hosts) = buildNet(n) net.start() alive = True executeRouteQueue() print "starting procs" server_procs = [server.popen(["iperf3", "-s", "-p", str(base_iperf_port+x)], stdin=PIPE, stderr=sys.stderr) for x in xrange(n)] host_procs = [host.popen( ["iperf3", "-c", server.IP(), "-i", str(result_interval), "-p", str(base_iperf_port+i), "-b", "{}M".format(bw), "-t", str(32), "-J"] + (["-u"] if udp else []), stdin=PIPE, stdout=PIPE, stderr=sys.stderr) for i, (host, bw) in enumerate(hosts) ] #net.interact() time.sleep(0.5) for step in xrange(n_steps): updateUpstreamRoute(server_switch, ac_prob=max_pdrop * (step/float(n_steps))) print "pushing drop rate", max_pdrop * (step/float(n_steps)), "at t =",0.5+step_interval*(step) time.sleep(step_interval) print "gathering stats..." updateUpstreamRoute(server_switch, ac_prob=0.0) datas_pre = [proc.communicate() for proc in host_procs] datas = [json.loads(data[0]) for data in datas_pre] print "gathered" out_results = [] for (_, bw), data in zip(hosts, datas): for inter in data["intervals"]: inner = inter["sum"] out_results.append([bw, inner["end"], float(inner["bits_per_second"])/1e6]) if udp: print "bw=", bw, "lost", data["end"]["sum"]["lost_percent"] print "output written, cleaning up..." removeAllSockets() for proc in host_procs + server_procs: try: proc.terminate() except: pass net.stop() return out_results
def run_exercise(): #Create and start a new network with our custom topology topo = DDoSTopo() net = Mininet(topo=topo) net.start() #Configure switch so that packets reach the right port (to prevent l2 learning from affecting the exercise) net["s1"].dpctl("del-flows") net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=1,nw_dst=10.0.0.1,actions=output:1,mod_vlan_vid:11,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=1,nw_dst=10.0.0.2,actions=output:2,mod_vlan_vid:11,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=1,nw_dst=10.0.0.3,actions=output:3,mod_vlan_vid:11,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=2,nw_dst=10.0.0.1,actions=output:1,mod_vlan_vid:12,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=2,nw_dst=10.0.0.2,actions=output:2,mod_vlan_vid:12,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=2,nw_dst=10.0.0.3,actions=output:3,mod_vlan_vid:12,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=3,nw_dst=10.0.0.1,actions=output:1,mod_vlan_vid:13,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=3,nw_dst=10.0.0.2,actions=output:2,mod_vlan_vid:13,output:4" ) net["s1"].dpctl( "add-flow", "dl_type=0x0800,in_port=3,nw_dst=10.0.0.3,actions=output:3,mod_vlan_vid:13,output:4" ) #Verify connectivity net.pingAll() processes = [] #Start BIND DNS-server processes.append(net["B"].popen( 'named', '-g', '-c', '/home/vagrant/assignments/DNS/config/named.conf')) #Open terminals processes.append(makeTerms([net["A"]], title="Attacker terminal")[0]) processes.append(makeTerms([net["D"]], title="Capture terminal")[0]) raw_input("Press Enter to exit....") for process in processes: process.kill() Cleanup.cleanup()
def mininetPrepare(self): self.NET = Mininet(topo=None, build=False) Cleanup().cleanup() for HOST in self.CONFIGURATION.MNHOSTS: HOST.ELEM = self.NET.addHost(HOST.ID) self.HOSTS[HOST.ID] = HOST for SWITCH in self.CONFIGURATION.MNSWITCHES: SWITCH.ELEM = self.NET.addSwitch(SWITCH.ID) self.SWITCHES[SWITCH.ID] = SWITCH if self.SWITCHES: self.POX = Popen([ 'python', '/'.join(abspath(__file__).split('/')[:-2]) + '/OFCONTROLLERS/pox/pox.py', 'forwarding.l2_learning' ], stdout=FNULL, stderr=STDOUT, preexec_fn=pre_exec) sleep(3) UNICTRL = MNController('UNICTRL', '127.0.0.1', 6633) UNICTRL.ELEM = self.NET.addController('UNICTRL', controller=RemoteController, ip='127.0.0.1', port=6633) self.CONTROLLERS['UNICTRL'] = UNICTRL for CONTROLLER in self.CONFIGURATION.MNCONTROLLER: CONTROLLER.ELEM = self.NET.addController( CONTROLLER.ID, controller=RemoteController, ip=CONTROLLER.IP, port=CONTROLLER.PORT) self.CONTROLLERS[CONTROLLER.ID] = CONTROLLER for OVS in self.CONFIGURATION.MNOVSES: OVS.ELEM = self.NET.addSwitch(OVS.ID) self.OVSSWITCHES[OVS.ID] = OVS ifacesData = self.interfacesMaping() for LINK in self.CONFIGURATION.CONNECTIONS: if not "IN/OUTIFACE" in LINK and not "OUT/INIFACE" in LINK: if LINK["IN/OUT"] in self.SWITCHES: Element01 = self.SWITCHES[LINK["IN/OUT"]] else: Element01 = self.OVSSWITCHES[LINK["IN/OUT"]] if LINK["OUT/IN"] in self.SWITCHES: Element02 = self.SWITCHES[LINK["OUT/IN"]] else: Element02 = self.OVSSWITCHES[LINK["OUT/IN"]] self.NET.addLink(Element01.ELEM, Element02.ELEM) continue if "IN/OUTIFACE" in LINK and not "OUT/INIFACE" in LINK: if LINK["IN/OUT"] in self.VNFS or LINK["IN/OUT"] in self.VMS: if LINK["IN/OUT"] in self.VNFS: EXTERNALLINKS = self.VNFS[LINK["IN/OUT"]].VM else: EXTERNALLINKS = self.VMS[LINK["IN/OUT"]] for iface in EXTERNALLINKS.INTERFACES: if iface["MAC"] == LINK["IN/OUTIFACE"]: if iface["ID"] in ifacesData: brName = iface["ID"] virtualIface = ifacesData[iface["ID"]] break else: self.STATUS = -1 return -1 if LINK["OUT/IN"] in self.SWITCHES: Intf(brName, node=self.SWITCHES[LINK["OUT/IN"]].ELEM) elif LINK["OUT/IN"] in self.OVSSWITCHES: Intf(brName, node=self.OVSSWITCHES[LINK["OUT/IN"]].ELEM) else: self.STATUS = -2 return -2 continue else: Element01 = self.HOSTS[LINK["IN/OUT"]] if LINK["OUT/IN"] in self.SWITCHES: Element02 = self.SWITCHES[LINK["OUT/IN"]] else: Element02 = self.OVSSWITCHES[LINK["OUT/IN"]] NodesLink = self.NET.addLink(Element01.ELEM, Element02.ELEM) HostIface = Element01.ELEM.intf(NodesLink.intf1) HostIface.setMAC(LINK["IN/OUTIFACE"]) for iface in Element01.INTERFACES: if iface["MAC"] == LINK["IN/OUTIFACE"]: iface["ELEM"] = HostIface break continue if "OUT/INIFACE" in LINK and not "IN/OUTIFACE" in LINK: if LINK["OUT/IN"] in self.VNFS or LINK["OUT/IN"] in self.VMS: if LINK["OUT/IN"] in self.VNFS: EXTERNALLINKS = self.VNFS[LINK["OUT/IN"]].VM else: EXTERNALLINKS = self.VMS[LINK["OUT/IN"]] for iface in EXTERNALLINKS.INTERFACES: if iface["MAC"] == LINK["OUT/INIFACE"]: if iface["ID"] in ifacesData: brName = iface["ID"] virtualIface = ifacesData[iface["ID"]] break else: self.STATUS = -1 return -1 if LINK["IN/OUT"] in self.SWITCHES: Intf(brName, node=self.SWITCHES[LINK["IN/OUT"]].ELEM) elif LINK["IN/OUT"] in self.OVSSWITCHES: Intf(brName, node=self.OVSSWITCHES[LINK["IN/OUT"]].ELEM) else: self.STATUS = -2 return -2 continue else: if LINK["IN/OUT"] in self.SWITCHES: Element01 = self.SWITCHES[LINK["IN/OUT"]] else: Element01 = self.OVSSWITCHES[LINK["IN/OUT"]] Element02 = self.HOSTS[LINK["OUT/IN"]] NodesLink = self.NET.addLink(Element01.ELEM, Element02.ELEM) HostIface = Element01.ELEM.intf(NodesLink.intf2) HostIface.setMAC(LINK["OUT/INIFACE"]) for iface in Element02.INTERFACES: if iface["MAC"] == LINK["OUT/INIFACE"]: iface["ELEM"] = HostIface break continue else: if LINK["IN/OUT"] in self.HOSTS and LINK[ "OUT/IN"] in self.HOSTS: Element01 = self.HOSTS[LINK["IN/OUT"]] Element02 = self.HOSTS[LINK["OUT/IN"]] NodesLink = self.NET.addLink(Element01.ELEM, Element02.ELEM) HostIface01 = Element01.ELEM.intf(NodesLink.intf1) HostIface02 = Element01.ELEM.intf(NodesLink.intf2) HostIface01.setMAC(LINK["IN/OUTIFACE"]) HostIface02.setMAC(LINK["OUT/INIFACE"]) for iface in Element01.INTERFACES: if iface["MAC"] == LINK["IN/OUTIFACE"]: iface["ELEM"] = HostIface01 break for iface in Element02.INTERFACES: if iface["MAC"] == LINK["OUT/INIFACE"]: iface["ELEM"] = HostIface02 break continue if LINK["IN/OUT"] in self.HOSTS: Element01 = self.HOSTS[LINK["IN/OUT"]] if LINK["OUT/IN"] in self.VNFS: EXTERNALLINKS = self.VNFS[LINK["OUT/IN"]].VM else: EXTERNALLINKS = self.VMS[LINK["OUT/IN"]] for iface in EXTERNALLINKS.INTERFACES: if iface["MAC"] == LINK["OUT/INIFACE"]: if iface["ID"] in ifacesData: virtualIface = ifacesData[iface["ID"]] break else: self.STATUS = -1 return -1 HostIface = Intf(virtualIface, node=Element01.ELEM, mac=LINK["IN/OUTIFACE"]) for iface in Element01.INTERFACES: if iface["MAC"] == LINK["IN/OUTIFACE"]: iface["ELEM"] = HostIface break continue if LINK["OUT/IN"] in self.HOSTS: if LINK["IN/OUT"] in self.VNFS: EXTERNALLINKS = self.VNFS[LINK["IN/OUT"]].VM else: EXTERNALLINKS = self.VMS[LINK["IN/OUT"]] for iface in EXTERNALLINKS.INTERFACES: if iface["MAC"] == LINK["IN/OUTIFACE"]: if iface["ID"] in ifacesData: virtualIface = ifacesData[iface["ID"]] break else: self.STATUS = -1 return -1 Element02 = self.HOSTS[LINK["OUT/IN"]] HostIface = Intf(virtualIface, node=Element02.ELEM, mac=LINK["OUT/INIFACE"]) for iface in Element02.INTERFACES: if iface["MAC"] == LINK["OUT/INIFACE"]: iface["ELEM"] = HostIface break continue else: self.STATUS = -3 return -3 self.NET.build() for HOST in self.HOSTS: DummyIfaces = 0 for IFACE in self.HOSTS[HOST].INTERFACES: if "ELEM" in IFACE: if IFACE["IP"] != None: IFACE["ELEM"].setIP(IFACE["IP"]) else: if IFACE["ELEM"].IP() != None: self.HOSTS[HOST].ELEM.cmd("sudo ip addr flush " + str(IFACE["ELEM"].name)) else: self.HOSTS[HOST].ELEM.cmd("sudo ip link add mn-eth" + str(DummyIfaces) + " address " + IFACE["MAC"] + " type dummy") if IFACE["IP"] != None: self.HOSTS[HOST].ELEM.cmd("sudo ip addr add " + IFACE["IP"] + " dev " + "mn-eth" + str(DummyIfaces)) self.HOSTS[HOST].ELEM.cmd("sudo ifconfig mn-eth" + str(DummyIfaces) + " up") DummyIfaces += 1 return 0
net = Mininet( controller=Controller , link=TCLink) net.addController( 'c0' ) switches = [] count=1 ip1='10.0.1.' ip2='10.0.2.' for i in range(y): s = net.addSwitch('s'+str(i+1)) for j in range(x): ind=count if ind % 2 !=0: h = net.addHost( 'h'+str(count), ip=ip1+str(ind)+'/24') net.addLink( h, s, bw=1 ) else: h = net.addHost( 'h'+str(count), ip=ip2+str(ind)+'/24') net.addLink( h, s , bw=2 ) count= count+1 switches.append(s) for i in range(y): if i < y-1: net.addLink(switches[i], switches[i+1], bw=2) net.start() CLI( net ) net.stop() Cleanup.cleanup()
def run(hosts, host_config_path, sensor_config_path, grid_config_path, output_dir, resource_types, type_map, trecs_root_dir, time_limit=None, _loss=None): """Run the agents on a Mininet network. Parameters ---------- hosts : iterable Iterable containing information about each host. host_config_path : path_like Path to host config file sensor_config_path : path_like Path to sensor config file grid_config_path : path_like Path to grid config file output_dir : path_like Directory to which executables' output will be written. resource_types : set Set of resource types used in the current T-RECS run.abs type_map : dictionary A dictionary mapping resource names to types. trecs_root_dir : path_like Path of T-RECS root directory. time_limit : float (optional, default None) Maximum time to run the simulation for (in minutes). loss : float (optional, default None) Network loss (as a fraction of packets). """ relative_to_absolute_path_conversion(hosts, host_config_path) check_execs_and_required_files(hosts) prepare_run_directory(resource_types, trecs_root_dir) # Add grid container so that we can run the grid model. add_grid_host(hosts, grid_config_path, sensor_config_path, trecs_root_dir, output_dir) # Add resource models to their corresponding resource agent hosts add_resource_models_to_ra_hosts(hosts, type_map, trecs_root_dir) # Clean the Mininet remains from last run, if any Cleanup.cleanup() net = Mininet(link=TCLink, xterms=True, ipBase='{}/{}'.format(GLOBAL_IP, GLOBAL_PREFIX)) net.addController('c0') router = None switch = None vhosts = [] linkopts = dict(bw=100, delay='0ms', loss=_loss, use_htb=False) # bw in Mbps LOGGER.info("Create the network") mapping_host_ip = {} for host, host_addr, router_addr, local_addr in zip( hosts, HOST_ADDRS, ROUTER_ADDRS, LOCAL_ADDRS): if router is None: LOGGER.info("Add router to connect subnets and log traffic") router = net.addHost('router', ip=router_addr) LOGGER.info("Configure router") router.cmd('sysctl net.ipv4.ip_forward=1') if switch is None: LOGGER.info("Add switch to connect grid and RAs") switch = net.addSwitch('s0') LOGGER.info("Add host {} at {}".format(host['host_name'], host_addr)) vhost = net.addHost(host['host_name'], ip=host_addr) vhosts.append(vhost) LOGGER.info("Add link router <-> {}".format(host['host_name'])) link = net.addLink(router, vhost, **linkopts) router.setIP(router_addr, intf=link.intf1) vhost.setIP(host_addr, intf=link.intf2) # Add an entry to the router's routing table. router_ip = router_addr.split('/')[0] router.cmd('ip route add {} via {}'.format(network(host_addr), router_ip)) # Set the router as the host's default gateway. vhost.cmd('ip route add default via {}'.format(router_ip)) # If the host is the grid container or an RA, connect it to the LAN. if host['host_type'] in ('RA', 'grid'): LOGGER.info("Add link switch <-> {}".format(host['host_name'])) link = net.addLink(switch, vhost, **linkopts) vhost.setIP(local_addr, intf=link.intf2) mapping_host_ip[host['host_name']] = host_addr with open(path.join(trecs_root_dir, 'run', 'mapping_host_ip.json'), 'w') as mapping_file: mapping_file.write(dumps(mapping_host_ip)) LOGGER.info("Start the network") net.start() sleep(2) # sleep for 2 seconds LOGGER.info("Run the executables") # LOGGER.info("Run Scapy on router") # exec_path = os.path.join(trecs_root_dir, 'run', 'router', 'sniff.py') # router.cmd('{} > {} 2>&1 &'.format( # quote(exec_path), # quote(os.path.join( # output_dir, 'router', 'sniff.py') + '.out'))) for host, vhost in zip(reversed(hosts), reversed(vhosts)): for exec_ in host['executables']: path_, exec_name = path.split(exec_['executable_path']) LOGGER.info('Run {} on {}.'.format(exec_name, host['host_name'])) cmd = './{} {} > {} 2>&1 &'.format( quote(exec_name), ' '.join(exec_['command_line_arguments']), quote( path.join(output_dir, host['host_name'], exec_name) + '.out')) vhost.cmd('cd {}'.format(path_)) vhost.cmd(cmd) # Check if we should set a time limit. if time_limit is not None: alarm(int(60 * time_limit)) LOGGER.info("Run the CLI") try: CLI(net) except RuntimeError as e: LOGGER.warn(str(e)) finally: LOGGER.info("Stop the network") net.stop()
from mininet.clean import cleanup, killprocs, Cleanup from mininet.cli import CLI from mininet.log import info, setLogLevel from mininet.net import Containernet from mininet.node import Controller from pyftpdlib.authorizers import DummyAuthorizer from pyftpdlib.handlers import FTPHandler from pyftpdlib.servers import MultiprocessFTPServer from container.kali import Kali from controller import PoxController from typing import List, Tuple # Add a cleanup command to mininet.clean to clean pox controller Cleanup.addCleanupCallback(lambda: killprocs(PoxController.pox_comand)) class Scenario(object): """ Base scenario class. Performs the functions to run and document a scenario. Should be extended by any other implemented scenarios, all public methods are extensible. Extension classes should have the class name `Import` as per the `start` file. """ # These attributes allow for the filtering/ordering of scenarios when presenting them to students name = "Base Scenario" """Scenario name. Used in the produced documentation""" enabled = False """If the scenario should be shown to users.""" weight = -1
def clean_network(self): c = Cleanup() c.cleanup()
# Create a new ovsdb namespace self.switches = [] name = 'ovsdb%d' % cls.ovsdbCount kwargs.update( inNamespace=True ) kwargs.setdefault( 'privateDirs', self.privateDirs ) super( OVSDB, self ).__init__( name, **kwargs ) ovsdb = cnet.addHost( name, cls=self.self, **kwargs ) link = cnet.addLink( ovsdb, cnet.switches[ 0 ] ) cnet.switches[ 0 ].attach( link.intf2 ) ovsdb.configDefault() ovsdb.setDefaultRoute( 'via %s' % self.nat.intfs[ 0 ].IP() ) ovsdb.startOVS() # Install cleanup callback Cleanup.addCleanupCallback( OVSDB.cleanUpOVS ) class OVSSwitchNS( OVSSwitch ): "OVS Switch in shared OVSNS namespace" isSetup = False @classmethod def batchStartup( cls, switches ): result = [] for ovsdb, switchGroup in groupby( switches, attrgetter( 'ovsdb') ): switchGroup = list( switchGroup ) info( '(%s)' % ovsdb ) result += OVSSwitch.batchStartup( switchGroup, run=ovsdb.cmd ) return result
from multiprocessing import Process import subprocess import shutil # test_duration = 6 * 60*60 # six hours test_duration = 24 * 60 * 60 # 10 min # if the script is run directly (sudo custom/optical.py): if __name__ == '__main__': start_logging() write_time('starting', start_empty=True) ##Clean up Cleanup.cleanup() db = Database() db.delete_content() ##Start net = MyNetwork(get_config('configs.json')) net.start() net.enable_netflow(net.switches, net.config['controller']['ip']) net.enable_sflow(net.switches, net.config['controller']['ip'], 256, 30) net.pingAll() # this is for the discovery net.pingAll() #just to make sure everythings working int_table = net.print_interface_id( ) #This will print and log the interface name and corresponding ifaceindex in SFLOW net.print_link_info() # This will print and log the connection information h1, h2, h3 = net.get('h1', 'h2', 'h3')