def run(self, sim_args, curtime, entrypoint): if any(v not in environ for v in ['CLIENT', 'CLIENT_PARAMS', 'SERVER', 'SERVER', 'CLIENT_LOGS', 'SERVER_LOGS', 'CL_COMMIT', 'SV_COMMIT']): # TODO show help exit(1) client_image = environ['CLIENT'] client_params = environ['CLIENT_PARAMS'] server_image = environ['SERVER'] server_params = environ['SERVER_PARAMS'] cl_logdir = environ['CLIENT_LOGS'] sv_logdir = environ['SERVER_LOGS'] clcommit = environ['CL_COMMIT'] svcommit = environ['SV_COMMIT'] setLogLevel('info') net = Containernet(controller=Controller) info('*** Adding controller\n') net.addController('c0') info('*** Adding docker containers\n') server_vs = [sv_logdir + ':/logs'] # add kernel debug volume to allow eBPF code to run if sim_args.k: server_vs.append( '/sys/kernel/debug:/sys/kernel/debug:ro') server = net.addDocker('server', ip='10.0.0.251', environment={"ROLE": "server", "SERVER_PARAMS": server_params, "COMMIT": svcommit}, dimage=server_image + ":latest", volumes=server_vs) client = net.addDocker('client', ip='10.0.0.252', environment={"ROLE": "client", "CLIENT_PARAMS": client_params, "COMMIT": clcommit}, dimage=client_image + ":latest", volumes=[cl_logdir + ':/logs']) info('*** Adding switches\n') s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') info('*** Creating links\n') net.addLink(s1, s2, cls=TCLink, delay=sim_args.delay, bw=sim_args.bandwidth, max_queue_size=sim_args.queue) net.addLink(s1, client) net.addLink(s2, server) info('\n*** Updating and building client/server\n') server.cmd('./updateAndBuild.sh') client.cmd('./updateAndBuild.sh') info('*** Starting network\n') net.start() info('***Add reordering to server intf\n') net.get('s2').cmd('tc qdisc change dev s2-eth1 parent 5:1 netem delay ' + sim_args.delay + ' reorder 100% gap ' + str(sim_args.reordergap) + ' limit ' + str(sim_args.queue)) capture = PacketCapture() if sim_args.k: client.cmd(entrypoint + " &") else: server.cmd(entrypoint + " &" ) capture.startCapture() info('\n' + entrypoint + '\n') if sim_args.k: info(server.cmd(entrypoint) + "\n") else: info(client.cmd(entrypoint) + "\n") # Wait some time to allow server finish writing to log file sleep(3) capture.stopCapture() info('*** Stopping network') net.stop()
def create(cls, docker0_ip='172.18.0.1'): raw_net = Containernet(controller=Controller, link=TCLink) log.info('create mininet wtih default controller') raw_net.addController('c0') return cls(raw_net, docker0_ip)
def _create_network(self): self.net = Containernet(controller=Controller, link=TCLink) self.net.addController('c0') logger.info("Created network: %r" % self.net)
def run(self, sim_args, curtime, entrypoint): if any(v not in environ for v in [ 'CLIENT', 'CLIENT_PARAMS', 'SERVER', 'SERVER', 'CLIENT_LOGS', 'SERVER_LOGS', 'CL_COMMIT', 'SV_COMMIT' ]): # TODO show help exit(1) client_image = environ['CLIENT'] client_params = environ['CLIENT_PARAMS'] server_image = environ['SERVER'] server_params = environ['SERVER_PARAMS'] cl_logdir = environ['CLIENT_LOGS'] sv_logdir = environ['SERVER_LOGS'] clcommit = environ['CL_COMMIT'] svcommit = environ['SV_COMMIT'] setLogLevel('info') net = Containernet(controller=POX) info('*** Adding controller\n') poxCommand = 'forwarding.droplist' if sim_args.drops_to_client != None: poxCommand += ' --droplist_server=' + sim_args.drops_to_client if sim_args.drops_to_server != None: poxCommand += ' --droplist_client=' + sim_args.drops_to_server # add custom controller to drop list of packets net.addController('c0', poxArgs=poxCommand) info('*** Adding docker containers\n') server_vs = [sv_logdir + ':/logs'] # add kernel debug volume to allow eBPF code to run if sim_args.k: server_vs.append('/sys/kernel/debug:/sys/kernel/debug:ro') server = net.addDocker('server', ip='10.0.0.251', environment={ "ROLE": "server", "SERVER_PARAMS": server_params, "COMMIT": svcommit }, dimage=server_image + ":latest", volumes=server_vs) client = net.addDocker('client', ip='10.0.0.252', environment={ "ROLE": "client", "CLIENT_PARAMS": client_params, "COMMIT": clcommit }, dimage=client_image + ":latest", volumes=[cl_logdir + '/logs']) info('*** Adding switch\n') s1 = net.addSwitch('s1') s2 = net.addSwitch('s2', controller=OVSController) info('*** Creating links\n') net.addLink(s1, s2, cls=TCLink, delay=sim_args.delay, bw=sim_args.bandwidth, max_queue_size=sim_args.queue) net.addLink(s1, client) net.addLink(s2, server) info('\n*** Updating and building client/server\n') server.cmd('./updateAndBuild.sh') client.cmd('./updateAndBuild.sh') info('*** Starting network\n') net.start() capture = PacketCapture() if sim_args.k: client.cmd(entrypoint + " &") else: server.cmd(entrypoint + " &") capture.startCapture() info('\n' + entrypoint + '\n') if sim_args.k: info(server.cmd(entrypoint) + "\n") else: info(client.cmd(entrypoint) + "\n") # Wait some time to allow server finish writing to log file sleep(3) capture.stopCapture() info('*** Stopping network') net.stop()
def TreeContainerNet(depth=1, fanout=2, dimage="ubuntu:trusty", **kwargs): "Convenience function for creating tree networks with Docker." topo = ContainerTreeTopo(depth, fanout, dimage) return Containernet(topo=topo, **kwargs)
from mininet.net import Mininet from mininet.net import Containernet from mininet.node import Controller, Docker, OVSSwitch from mininet.cli import CLI net = Containernet() # net is a Mininet() object h1 = net.addHost( 'h1' ) # h1 is a Host() object h2 = net.addHost( 'h2' ,cls=Docker,dimage="gmiotto/click") # h2 is a Host() h3 = net.addHost( 'h3' ) # h1 is a Host() object s1 = net.addSwitch( 's1' ) # s1 is a Switch() object c0 = net.addController( 'c0' ) # c0 is a Controller() net.addLink( h2, s1 ) net.addLink( h2, s1 ) net.addLink( h1, s1 ) # creates a Link() object net.addLink( h3, s1 ) net.start() #print h1.cmd( 'ping -c1', h2.IP() ) CLI( net ) net.stop()
def _create_network(self): self.net = Containernet(controller=Controller) self.net.addController("c0") LOG.info("Created network: %r" % self.net)
from mininet.node import OVSSwitch, Controller, RemoteController from mininet.cli import CLI from mininet.net import Containernet from mininet.log import info, setLogLevel import time import os setLogLevel('info') info('*** Create the controller \n') #info(c0) "Create Simple topology example." net = Containernet(switch = OVSSwitch, build=False) net.addController('c0', controller = RemoteController, ip = "127.0.0.1", port = 6653) net.addController('c1', controller = RemoteController, ip = "127.0.0.1", port = 6654) # Initialize topology # Add containers h1 = net.addHost('h1', ip='10.0.0.251') # Cliente h2 = net.addHost('h2', ip='10.0.0.252') # Atacante h3 = net.addHost('h3', ip='10.0.0.253') # Victima # Add switches info('*** Adding switches\n') sw1 = net.addSwitch('sw1', protocols='OpenFlow13') # Add links info('*** Creating links\n')
def topology(num=100, max_r=100, mode="g", xp=None, plot=False, ping=False, serf_conf=False, devmodel=None, sleep_test=-1, new_met=False): info( "Creating a network with docker containers acting as hosts and wireless mesh network environment.\n" ) net = Containernet(controller=Controller) #info('*** Adding docker containers\n') ## In automated way will create Hosts dh = [] if load_topo: net, dh, num = load_positions(net) net = start_network(net) seed_node = "10.0.0.254:5001" ## seed will be always the first container! config_nodes(seed_node, dh) printTopo(net, num=num, plotgrp=plot, ping=ping) if not xp: info("> Not running automatic experience!\n") else: #TODO: for now there is only one!!!!! experiment1(net, dh, num=num, sleep_test=sleep_test, new_met=new_met) info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop() sys.exit() ## save(?) and exit #Using the new method to calculate positions info("*** Creating positions for nodes\n") listPos, G = prepareGraph(num, max_r) print("* Nodes positions created!") print("** Creating %d Station(s) " % (num)) if save_topo: with open(save_topo_file, "w+") as tdh: #start writing things #NumOfHosts;[(posx,posy,posz),..];docker_image;device_model;range;ip?;other info tdh.write("### Topology for %d hosts\n" % (num)) tdh.write( "#NumOfHosts;[(posx,posy,posz);docker_image;device_model;range;ip?];other info\n" ) tdh.write("NumOfHosts=%d;\n" % num) tdh.flush() for x in range(0, num): ip = (254 - x) posx, posy = getPosition(listPos) posz = 10 r = max_r position = str(posx) + ',' + str(posy) + ',' + str(posz) ## This will create the hosts with image ubuntu:trusty, position and range of the device ## other information can be added later #dh.append("null") dh.append( net.addDocker('d' + str(x), cls=Docker, ip='10.0.0.' + str(ip), dimage=docker_image, position=position, range=r, mode=mode, equipmentModel=devmodel)) d = dh[x] sys.stdout.write(str(d) + " ") sys.stdout.flush() if save_topo: with open(save_topo_file, "a+") as tdh: tdh.write( "[\nnode_num=%d;\nposition=(%s);\ndocker_image=%s;\nequip_model=%s;\nrange=%d;\nmode=%s;\nip=10.0.0.%d;\n]\n" % (x, position, docker_image, devmodel, r, mode, ip)) tdh.flush() container_constraint(dh=dh) info("\n** Adding nodes to Mesh\n") hlinks = [] for x in dh: hlinks.append(net.addMesh(x, ssid='meshNet')) x.verifyingNodes(x) ## Mesh routing is not doing a mesh, only 1 to nearby meshr = net.meshRouting("custom") info("** Routing nodes through mesh\n") for x in dh: meshRouting.customMeshRouting(x, 0, net.wifiNodes) sys.stdout.write(str(x) + " ") sys.stdout.flush() start_network(net) seed_node = "10.0.0.254:5001" ## seed will be always the first container! config_nodes(seed_node, dh) # This will print the topology and network configuration of the # random created network printTopo(net, num=num, plotgrp=plot, ping=ping) # The simulation selected will start: if not xp: info("> Not running automatic experiment!\n") elif xp == "None": info("> Not running automatic experiment\n") elif xp == 'dataset': exp_dataset(net) else: #TODO: for now there is only one!!!!! experiment1(net, dh, num=num, sleep_test=sleep_test, new_met=new_met) info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
def tfTopo(): net = Containernet(topo=None, controller=RemoteController, switch=OVSKernelSwitch) net.addController('c0', RemoteController, ip="127.0.0.1", port=6633) # Hosts h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01') h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02') h3 = net.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03', cls=Docker, dimage='gmiotto/click', mem_limit=1024 * 1024 * 10, cpu_shares=2) h4 = net.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04', cls=Docker, dimage='gmiotto/click', mem_limit=1024 * 1024 * 10, cpu_shares=10) h5 = net.addHost('h5', ip='10.0.0.5', mac='00:00:00:00:00:05', cls=Docker, dimage='gmiotto/click', mem_limit=1024 * 1024 * 10, cpu_shares=10) h6 = net.addHost('h6', ip='10.0.0.6', mac='00:00:00:00:00:06') h7 = net.addHost('h7', ip='10.0.0.7', mac='00:00:00:00:00:07') h8 = net.addHost('h8', ip='10.0.0.8', mac='00:00:00:00:00:08') h9 = net.addHost('h9', ip='10.0.0.9', mac='00:00:00:00:00:09') #Switches s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') s3 = net.addSwitch('s3') s4 = net.addSwitch('s4') s5 = net.addSwitch('s5') s6 = net.addSwitch('s6') s7 = net.addSwitch('s7') s8 = net.addSwitch('s8') s9 = net.addSwitch('s9') net.addLink(h3, s3) net.addLink(h3, s3) net.addLink(h4, s4) net.addLink(h4, s4) net.addLink(h5, s5) net.addLink(h5, s5) net.addLink(s1, s6) net.addLink(s1, s7) #net.addLink(s6, s3, cls=TCLink, delay="100ms", bw=0.5, loss=0) net.addLink(s6, s3) net.addLink(s6, s4, cls=TCLink, delay="1ms", bw=2, loss=0) #net.addLink(s6,s4) net.addLink(s6, s5) net.addLink(s7, s3) net.addLink(s7, s5) net.addLink(s3, s8) net.addLink(s3, s9) net.addLink(s4, s8, cls=TCLink, delay="1ms", bw=2, loss=0) net.addLink(s4, s9) net.addLink(s5, s9) net.addLink(s8, s2) net.addLink(s9, s2) net.addLink(h1, s1) net.addLink(h2, s2) net.addLink(h6, s6) net.addLink(h7, s7) net.addLink(h8, s8) net.addLink(h9, s9) net.start() for host in net.hosts: if "h" in host.name: host.cmd('ethtool -K %s-eth0 tso off' % host.name) call("sudo bash Click/runFirewall.sh h3 Click/firewall3.click ", shell=True) call("sudo bash Click/runFirewall.sh h4 Click/firewall3.click ", shell=True) call("sudo bash Click/runFirewall.sh h5 Click/firewall3.click ", shell=True) h2.cmd('python -m SimpleHTTPServer 80 &') CLI(net) net.stop()
def emulate(): "Create a network with some docker containers acting as hosts." net = Containernet(controller=Controller) info('*** Adding controller\n') net.addController('c0') info('*** Adding hosts\n') h1 = net.addHost('h1') h2 = net.addHost('h2') info('*** Adding docker containers\n') d1 = net.addDocker('d1', ip='10.0.0.251', dimage="ubuntu:trusty") # A container with more specific params: cpu period and cpu quota d2 = net.addDocker('d2', ip='10.0.0.252', dimage="ubuntu:trusty", cpu_period=50000, cpu_quota=25000) # Add a container as a host, using Docker class option. d3 = net.addHost('d3', ip='11.0.0.253', cls=Docker, dimage="ubuntu:trusty", cpu_shares=20) # Add a container with a specific volume. d5 = net.addDocker('d5', dimage="ubuntu:trusty", volumes=["/:/mnt/vol1:rw"]) info('*** Adding switch\n') s1 = net.addSwitch('s1') s2 = net.addSwitch('s2', cls=OVSSwitch) s3 = net.addSwitch('s3') info('*** Creating links\n') net.addLink(h1, s1) net.addLink(s1, d1) net.addLink(h2, s2) net.addLink(d2, s2) net.addLink(s1, s2) # try to add a second interface to a docker container net.addLink(d2, s3, params1={"ip": "11.0.0.254/8"}) net.addLink(d3, s3) info('*** Starting network\n') net.start() # The typical ping example, with two docker instances in place of hosts. net.ping([d1, d2]) # our extended ping functionality net.ping([d1], manualdestip="10.0.0.252") net.ping([d2, d3], manualdestip="11.0.0.254") info('*** Dynamically add a container at runtime\n') d4 = net.addDocker('d4', dimage="ubuntu:trusty") # we have to specify a manual ip when we add a link at runtime net.addLink(d4, s1, params1={"ip": "10.0.0.254/8"}) # Ping docker instance d1. net.ping([d1], manualdestip="10.0.0.254") info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
class CNAdapter: # create containernet instance # net = Containernet(controller=Controller) net = Containernet() # add a default sdn controller for OVS switches to switch end host traffic # net.addController('c0') ######################### # router-related vars ######################### sandbox_routers: List['Router'] = [] sb_router_to_container: Dict['Router', 'Docker'] = {} # track how many interfaces of a sandbox router has been processed # we need this index to build the interface name sb_router_to_container_if_index: Dict['Router', int] = {} # we map 'logical' interface obj to the absolute interface name in a container # we can use it to build absolute interface name to IPv4Interface string mapping # this way, we can build bgpd.conf and zebra.conf for each router container sb_router_if_to_container_if_name: Dict['NetworkInterface', str] = {} ######################### # switch-related vars ######################### sandbox_switches: List['Switch'] = [] # as for now, we do not use Docker containers to implement switches/bridges # unless there is an absolute perf-related issue, we will continue to not use Docker for switches/bridges sb_switch_to_OVSBridge: Dict['Switch', 'OVSBridge'] = {} sb_switch_to_OVSBridge_index: Dict['Switch', int] = {} sb_switch_if_to_OVSBridge_if_name: Dict['NetworkInterface', str] = {} ######################### # end-host-related vars ######################### sandbox_end_hosts: List['Host'] = [] sb_host_to_container: Dict['Host', 'Docker'] = {} sb_host_to_container_index: Dict['Host', int] = {} sb_host_if_to_container_if_name: Dict['NetworkInterface', str] = {} def __init__(self, as_list: List['AutonomousSystem']): ######################### # router-router linking ######################### # get all routers from all sandbox ASes self.sandbox_routers = [ router for router in [as_obj.routers[0] for as_obj in as_list] ] # prepare boring router docker containers self.sb_router_to_container = {} for sandbox_router in self.sandbox_routers: container = self.net.addDocker(sandbox_router.get_node_id(), ip="", dimage=sandbox_router.docker_image, cap_add=sandbox_router.docker_caps) # add the sandbox router to its container mapping self.sb_router_to_container[sandbox_router] = container # iterate through each sandbox router, link containers for local_router in self.sandbox_routers: local_router_interfaces = local_router.router_to_router_net_interfaces for local_interface in local_router_interfaces: # get remote router info remote_interface = local_interface.get_paired_interface() remote_router = remote_interface.get_owner_node() # check if each router is already in sandbox_router_to_container_interface_index if local_router not in self.sb_router_to_container_if_index: self.sb_router_to_container_if_index[local_router] = 0 if remote_router not in self.sb_router_to_container_if_index: self.sb_router_to_container_if_index[remote_router] = 0 # get each router's current container interface index local_router_int_i = self.sb_router_to_container_if_index[ local_router] remote_router_int_i = self.sb_router_to_container_if_index[ remote_router] # generate the container interface name for each router local_router_int_name = CNAdapter.get_interface_name_in_containernet( local_router.get_node_id(), local_router_int_i) remote_router_int_name = CNAdapter.get_interface_name_in_containernet( remote_router.get_node_id(), remote_router_int_i) # save the sandbox network interface to container interface name mapping if (local_interface not in self.sb_router_if_to_container_if_name) and ( remote_interface not in self.sb_router_if_to_container_if_name): self.sb_router_if_to_container_if_name[ local_interface] = local_router_int_name self.sb_router_if_to_container_if_name[ remote_interface] = remote_router_int_name # now we can safely link two router containers together local_router_container = self.sb_router_to_container[ local_router] remote_router_container = self.sb_router_to_container[ remote_router] self.net.addLink(local_router_container, remote_router_container) # increment container interface index for both routers self.sb_router_to_container_if_index[local_router] += 1 self.sb_router_to_container_if_index[remote_router] += 1 else: print('redundant interface mapping process, skip') pass for network_if, container_if in self.sb_router_if_to_container_if_name.items( ): print("sb ip if: {}, container if: {}".format( network_if.get_ip_interface(), container_if)) # TODO: we create end host boxes and attach them to the corresponding routers via OVSBridges ######################### # switch-router linking ######################### # populate the sandbox switch list for r in self.sandbox_routers: r_to_s_ifs = r.router_to_switch_net_interfaces for r_to_s_if in r_to_s_ifs: self.sandbox_switches.append( r_to_s_if.get_paired_interface().get_owner_node()) # create ovs bridges in mininet # and link them to routers in mininet for s in self.sandbox_switches: ovs_br = self.net.addSwitch(name=s.get_node_id(), cls=OVSBridge) self.sb_switch_to_OVSBridge[s] = ovs_br # find the linked sandbox router linked_sb_router = None linked_sb_router_if = None switch_net_if_linked_to_router = None for n_if in s.net_interfaces: paired_if = n_if.get_paired_interface() paired_node = paired_if.get_owner_node() if isinstance(paired_node, Router): linked_sb_router = paired_node linked_sb_router_if = paired_if switch_net_if_linked_to_router = n_if break assert linked_sb_router is not None # retrieve the corresponding router container router_container = self.sb_router_to_container[linked_sb_router] # link the ovs bridge and router container self.net.addLink(ovs_br, router_container) # first let's check if s has an index; create one if not if s not in self.sb_switch_to_OVSBridge_index: self.sb_switch_to_OVSBridge_index[s] = 0 # bind net_if with its actual eth str name self.sb_switch_if_to_OVSBridge_if_name[switch_net_if_linked_to_router] = \ CNAdapter.get_interface_name_in_containernet( s.get_node_id(), self.sb_switch_to_OVSBridge_index[s] ) self.sb_router_if_to_container_if_name[linked_sb_router_if] = \ CNAdapter.get_interface_name_in_containernet( linked_sb_router.get_node_id(), self.sb_router_to_container_if_index[linked_sb_router] ) # increment if indexes self.sb_switch_to_OVSBridge_index[s] += 1 self.sb_router_to_container_if_index[linked_sb_router] += 1 ######################### # host-switch linking ######################### # populate the end host list for s in self.sandbox_switches: for sif in s.net_interfaces: n = sif.get_paired_interface().get_owner_node() if isinstance(n, Host): self.sandbox_end_hosts.append(n) # create end host containers and # link them to the corresponding switches (ovs bridges) for h in self.sandbox_end_hosts: end_host_container = self.net.addDocker(h.get_node_id(), ip="", dimage=h.docker_image, cap_add=h.docker_caps) self.sb_host_to_container[h] = end_host_container # get the connected sb switch # TODO: we assume each end host has only one net_interface which is directly connected to the switch sb_switch = h.net_interfaces[0].get_paired_interface( ).get_owner_node() assert type(sb_switch) is Switch # get the OVS bridge ovs_bridge = self.sb_switch_to_OVSBridge[sb_switch] # link ovs_bridge with the end host container self.net.addLink(end_host_container, ovs_bridge) # create an index for the host if h not in self.sb_host_to_container_index: self.sb_host_to_container_index[h] = 0 # bind net_if to actual eth name in containernet self.sb_host_if_to_container_if_name[h.net_interfaces[ 0]] = CNAdapter.get_interface_name_in_containernet( h.get_node_id(), self.sb_host_to_container_index[h]) self.sb_switch_if_to_OVSBridge_if_name[h.net_interfaces[0].get_paired_interface()] = \ CNAdapter.get_interface_name_in_containernet( sb_switch.get_node_id(), self.sb_switch_to_OVSBridge_index[sb_switch] ) # increment indexes self.sb_host_to_container_index[h] += 1 self.sb_switch_to_OVSBridge_index[sb_switch] += 1 # start net self.net.start() # config runtime self.runtime_config() #def __del__(self): # self.net.stop() def runtime_config(self): ######################### # router config ######################### # create a Quagga config generator qcg: 'QuaggaConfigGenerator' = QuaggaConfigGenerator() # go through each router container and create zebra and bgpd config files for r, c in self.sb_router_to_container.items(): net_ifs = [] neighbors = [] # neighbor: {"ip": str, "asn": int} networks = [p.with_prefixlen for p in r.owner_as.prefixes] for net_if in r.router_to_router_net_interfaces: net_if_dict = { 'name': self.sb_router_if_to_container_if_name[net_if], 'ip': net_if.get_ip_interface().ip.compressed, 'prefix_len': net_if.get_ip_interface().network.prefixlen } net_ifs.append(net_if_dict) # now build neighbor remote_if = net_if.get_paired_interface() # such jump much wow remote_if_ip = remote_if.get_ip_interface().ip.compressed remote_if_asn = remote_if.get_owner_node().owner_as.asn neighbor = {"ip": remote_if_ip, "asn": remote_if_asn} neighbors.append(neighbor) # turn off default nat interface provided by docker c.cmd("ifconfig eth0 0") # install zebra config zebra_conf_str = qcg.generate_zebra_config(r.node_id, net_ifs) cmd_install_zebra_conf = 'echo "{}" > /etc/quagga/zebra.conf'.format( zebra_conf_str) c.cmd(cmd_install_zebra_conf) c.waitOutput() # run zebra! c.cmd( "zebra -f /etc/quagga/zebra.conf -d -i /tmp/zebra.pid -z /tmp/zebra.sock" ) # install bgpd config bgpd_config_str = qcg.generate_bgpd_config(node_id=r.node_id, asn=r.owner_as.asn, networks=networks, neighbors=neighbors) cmd_install_bgpd_conf = 'echo "{}" > /etc/quagga/bgpd.conf'.format( bgpd_config_str) c.cmd(cmd_install_bgpd_conf) c.waitOutput() # run bgpd! c.cmd( "bgpd -f /etc/quagga/bgpd.conf -d -i /tmp/bgpd.pid -z /tmp/zebra.sock" ) # now we set up the ips for end-host-facing interfaces for net_if in r.router_to_switch_net_interfaces: # we only run ifconfig if the interface has been created if net_if in self.sb_router_if_to_container_if_name: ip_if = net_if.get_ip_interface() eth_name = self.sb_router_if_to_container_if_name[net_if] c.cmd("ip addr add {} dev {}".format( ip_if.with_prefixlen, eth_name)) ######################### # end host config ######################### for net_if, eth_name in self.sb_host_if_to_container_if_name.items(): sb_host = net_if.get_owner_node() host_container = self.sb_host_to_container[sb_host] # turn off default eth provided by docker host_container.cmd("ifconfig eth0 0") # add host ip to the right eth host_container.cmd("ip addr add {} dev {}".format( net_if.get_ip_interface().with_prefixlen, eth_name)) # set the default gateway for the end host # the gateway ip is end host's ip interface's network's second IP # e.g., an end host ip_interface: 1.1.1.200/25, the network is 1.1.1.128/25, # the second ip of the network is 1.1.1.129. # therefore, the gateway for the end host is 1.1.1.129 host_container.cmd("ip route add default via {}".format( net_if.get_ip_interface().network[1])) @staticmethod def get_interface_name_in_containernet(node_id: str, container_interface_index: int): interface_template = "{}-eth{}" return interface_template.format(node_id, container_interface_index)
def topology(): "Adding network details for a containerized topology" net = Containernet(controller=RemoteController) net.addController('c0', controller=RemoteController, ip='127.0.0.1', port=8080) info('***** Adding BIRD Routers *****\n') r1 = net.addDocker('r1', ip='10.10.1.1/24', dimage='bird:tanmay') r2 = net.addDocker('r2', ip='10.11.1.1/24', dimage='bird:tanmay') r3 = net.addDocker('r3', ip='10.20.1.1/24', dimage='bird:tanmay') r4 = net.addDocker('r4', ip='10.21.1.1/24', dimage='bird:tanmay') r5 = net.addDocker('r5', ip='192.168.99.2/24', dimage='bird:tanmay') info('***** Adding Hosts *****\n') h1 = net.addDocker('h1', ip='10.10.1.2/24', defaultRoute='via 10.10.1.1', dimage='ubuntu:focal') h2 = net.addDocker('h2', ip='10.11.1.2/24', defaultRoute='via 10.11.1.1', dimage='ubuntu:focal') h3 = net.addDocker('h3', ip='10.20.1.2/24', defaultRoute='via 10.20.1.1', dimage='ubuntu:focal') h4 = net.addDocker('h4', ip='10.21.1.2/24', defaultRoute='via 10.21.1.1', dimage='ubuntu:focal') info('***** Adding Switches *****\n') sdn_r1 = net.addSwitch('sdn_r1') sdn_r2 = net.addSwitch('sdn_r2') info('***** Creating Links *****\n') net.addLink(h1, r1) net.addLink(h2, r2) net.addLink(h3, r3) net.addLink(h4, r4) net.addLink(r1, sdn_r1, params1={'ip': '192.168.101.2/24'}) net.addLink(r2, sdn_r1, params1={'ip': '192.168.102.2/24'}) net.addLink(r3, sdn_r2, params1={'ip': '192.168.201.2/24'}) net.addLink(r4, sdn_r2, params1={'ip': '192.168.202.2/24'}) net.addLink(sdn_r1, r5) net.addLink(r5, sdn_r2, params1={'ip': '192.168.98.2/24'}) net.addLink(sdn_r1, sdn_r2) info('***** Starting network *****\n') net.start() #Adding management interface on router sdn 1 cmd1 = 'sudo ip addr add 192.168.101.1/24 dev sdn_r1' cmd2 = 'sudo ip addr add 192.168.102.1/24 dev sdn_r1' cmd3 = 'sudo ip addr add 192.168.99.1/24 dev sdn_r1' cmd4 = 'sudo ip link set sdn_r1 up' os.system(cmd1) os.system(cmd2) os.system(cmd3) os.system(cmd4) cmd5 = 'sudo ovs-vsctl set bridge sdn_r1 other-config:disable-in-band=true' os.system(cmd5) #Adding management interface on router sdn 2 cmd1 = 'sudo ip addr add 192.168.201.1/24 dev sdn_r2' cmd2 = 'sudo ip addr add 192.168.202.1/24 dev sdn_r2' cmd3 = 'sudo ip addr add 192.168.98.1/24 dev sdn_r2' cmd4 = 'sudo ip link set sdn_r2 up' os.system(cmd1) os.system(cmd2) os.system(cmd3) os.system(cmd4) cmd5 = 'sudo ovs-vsctl set bridge sdn_r2 other-config:disable-in-band=true' os.system(cmd5) #copying BIRD configs in respective dockers os.system('sudo docker cp r1.conf mn.r1:/etc/bird.conf') os.system('sudo docker cp r2.conf mn.r2:/etc/bird.conf') os.system('sudo docker cp r3.conf mn.r3:/etc/bird.conf') os.system('sudo docker cp r4.conf mn.r4:/etc/bird.conf') os.system('sudo docker cp r5.conf mn.r5:/etc/bird.conf') r1.cmd('bird -c /etc/bird.conf') r2.cmd('bird -c /etc/bird.conf') r3.cmd('bird -c /etc/bird.conf') r4.cmd('bird -c /etc/bird.conf') r5.cmd('bird -c /etc/bird.conf') info('***** Running CLI *****\n') CLI(net) info('***** Stopping network *****\n') net.stop()
from mininet.cli import CLI from mininet.net import Containernet from mininet.log import info, setLogLevel from mininet.link import TCLink import time import os setLogLevel('info') info('*** Create the controller \n') #info(c0) "Create Simple topology example." net = Containernet(switch = OVSSwitch, build=False, link=TCLink) net.addController('c0', controller = RemoteController, ip = "127.0.0.1", port = 6653) net.addController('c1', controller = RemoteController, ip = "127.0.0.1", port = 6654) # Initialize topology # Add containers h1 = net.addHost('h1', ip='10.0.0.251') # Cliente h2 = net.addHost('h2', ip='10.0.0.252') # Atacante h3 = net.addHost('h3', ip='10.0.0.253') # Victima # Add switches info('*** Adding switches\n') sw1 = net.addSwitch('sw1', protocols='OpenFlow13') # Add links info('*** Creating links\n')
''' from mininet.node import OVSSwitch, Controller, RemoteController from mininet.cli import CLI from mininet.net import Containernet from mininet.log import info, setLogLevel import os setLogLevel('info') info('*** Create the controller \n') c0 = RemoteController('c0', port=6653) info(c0) "Create Simple topology example." net = Containernet(build=False) # Initialize topology # Add containers info('*** Adding docker containers using openswitch/ubuntuscapy images\n') h1 = net.addDocker('h1', ip='10.0.0.251', dimage="openswitch/ubuntuscapy") h2 = net.addDocker('h2', ip='10.0.0.252', dimage="openswitch/ubuntuscapy") # Add switches info('*** Adding switches\n') s1 = net.addSwitch('s1') # Add links info('*** Creating links\n') net.addLink(h1, s1) net.addLink(h2, s1)
def topology(): "Create a network with some docker containers acting as hosts." net = Containernet(controller=RemoteController) info('*** Adding controller\n') net.addController('c0') info('*** Adding hosts\n') Victim = net.addHost( 'h1', mac='00:00:00:00:00:01') Host2 = net.addHost( 'h2', mac='00:00:00:00:00:02' ) Host3 = net.addHost( 'h3', mac='00:00:00:00:00:03' ) Host4 = net.addHost( 'h4', mac='00:00:00:00:00:04' ) Host5 = net.addHost( 'h5', mac='00:00:00:00:00:05' ) Host6 = net.addHost( 'h6', mac='00:00:00:00:00:06' ) #info('*** Adding docker containers\n') d1 = net.addDocker('d1', ip='10.0.0.251', mac='00:00:00:00:00:11', dimage="mit/filter:latest") d2 = net.addDocker('d2', ip='10.0.0.252', mac='00:00:00:00:00:12', dimage="mit/filter:latest") d5 = net.addDocker('d5', ip='10.0.0.253', mac='00:00:00:00:00:15', dimage="mit/filter:latest") info('*** Adding switch\n') Switch1 = net.addSwitch( 's1' ) Switch2 = net.addSwitch( 's2' ) Switch3 = net.addSwitch( 's3' ) Switch4 = net.addSwitch( 's4' ) Switch5 = net.addSwitch( 's5' ) Switch6 = net.addSwitch( 's6' ) info('*** Creating links\n') net.addLink( Switch1, Switch3 ) net.addLink( Switch3, Switch5 ) net.addLink( Switch3, Switch6 ) net.addLink( Switch5, Host2 ) net.addLink( Switch6, Victim ) net.addLink( Switch6, Switch4 ) net.addLink( Switch4, Switch2) net.addLink( Host3, Switch1 ) net.addLink( Host4, Switch1 ) net.addLink( Host5, Switch2 ) net.addLink( Host6, Switch2 ) net.addLink(Switch1 , d1 ) net.addLink(Switch2 , d2 ) net.addLink(Switch5 , d5 ) #net.addLink(s1, s2, cls=TCLink, delay="100ms", bw=1, loss=10) # try to add a second interface to a docker container #net.addLink(d2, s3, params1={"ip": "11.0.0.254/8"}) #net.addLink(d3, s3) info('*** Starting network\n') net.start() #net.ping([d1, d2]) # our extended ping functionality #net.ping([d1], manualdestip="10.0.0.252") #net.ping([d2, d3], manualdestip="11.0.0.254") #info('*** Dynamically add a container at runtime\n') #d4 = net.addDocker('d4', dimage="ubuntu:trusty") # we have to specify a manual ip when we add a link at runtime #net.addLink(d4, s1, params1={"ip": "10.0.0.254/8"}) # other options to do this #d4.defaultIntf().ifconfig("10.0.0.254 up") #d4.setIP("10.0.0.254") #net.ping([d1], manualdestip="10.0.0.254") info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
#!/usr/bin/python from mininet.net import Containernet from mininet.node import Controller, Node from mininet.cli import CLI from mininet.link import TCLink from mininet.log import info, setLogLevel from mininet.bmv2 import Bmv2Switch, P4DockerHost setLogLevel('info') net = Containernet(controller=Controller, switch=Bmv2Switch) info('*** Adding controller\n') net.addController('c0') info('*** Adding docker containers\n') d1 = net.addDocker('hss', cls=P4DockerHost, ip='192.168.61.2', dimage='ubuntu_iperf:1804', mac='00:00:00:00:00:E2') d2 = net.addDocker('mme', cls=P4DockerHost, ip='192.168.61.3', dimage='ubuntu_iperf:1804', mac='00:00:00:00:00:E3') info('*** Adding switches\n') s1 = net.addSwitch('s1', json='./basic.json',
def myNetwork(): net = Containernet(controller=Controller) info( '*** Adding controller\n' ) net.addController(name='c0') info( '*** Add switches\n') s1 = net.addSwitch('s1', cls=OVSSwitch) s2 = net.addSwitch('s2', cls=OVSSwitch) info( '*** Add hosts\n') # Attacker host mn_args = { "network_mode": "none", "dimage": "cristitech/attacker", "dcmd": None, "ip": "192.168.16.108/24", } H1 = net.addDocker('attacker', **mn_args) # Victim host mn_args = { "network_mode": "none", "dimage": "cristitech/victim", #"dcmd": "./start_app.sh", "ip": "192.168.16.109/24", } H2 = net.addDocker('victim', **mn_args) # Vulnerable server mn_args = { "network_mode": "none", "dimage": "cristitech/docker-heartbleed", "dcmd": None, "ip": "192.168.17.110/24", } H3 = net.addDocker('server', **mn_args) #cristitech/docker-heartbleed info( '*** Add links\n') net.addLink( H1, s1 ) net.addLink( H2, s1 ) net.addLink( H3, s2 ) info ('*** Add Internet access\n') mn_args = { "ip": "192.168.16.1/24", } nat = net.addHost( 'nat0', cls=NAT, inNamespace=False, subnet='192.168.16.0/24', **mn_args ) # Connect the nat to the switch net.addLink( nat, s1 ) mn_args = { "ip": "192.168.17.1/24" } nat2 = net.addHost('nat1', cls=NAT, inNamespace=False, subnet='192.168.17.0/24', **mn_args) net.addLink(nat2, s2) info( '*** Starting network\n') net.start() H1.cmd('ip r a default via 192.168.16.1') H2.cmd('ip r a default via 192.168.16.1') # shutdown eth0 H3.cmd('ip link set eth0 down') # power on server-eth0 H3.cmd('ip link set server-eth0 up') # set default route H3.cmd('ip r a default via 192.168.17.1') # set apache servername H3.cmd('echo "ServerName 192.168.17.110" >> /etc/apache2/apache2.conf') # restart apache H3.cmd('apache2ctl restart') CLI(net) net.stop()
def myNetwork(): net = Containernet(controller=Controller) info('*** Adding controller\n') net.addController(name='c0') info('*** Add switches\n') s1 = net.addSwitch('s1') info('*** Add hosts\n') # attacker 2 docker containers mn_args = { "network_mode": "none", "dimage": "exam_docker", "dcmd": "./start_app.sh", "ip": "192.168.16.2/24", } H1 = net.addDocker('h1', **mn_args) mn_args = { "network_mode": "none", "dimage": "exam_docker", "dcmd": "./start_app.sh", "ip": "192.168.16.3/24", } H2 = net.addDocker('h2', **mn_args) mn_args = { "network_mode": "none", "dimage": "lab07/snort", "dcmd": None, "ip": "192.168.16.100/24", } H3 = net.addDocker('IDS', **mn_args) info('*** Add links\n') net.addLink(H1, s1) net.addLink(H2, s1) net.addLink(H3, s1) info('*** Add Internet access\n') mn_args = { "ip": "192.168.16.1/24", } nat = net.addHost('nat0', cls=NAT, inNamespace=False, subnet='192.168.16.0/24', **mn_args) # Connect the nat to the switch net.addLink(nat, s1) info('*** Starting network\n') net.start() H1.cmd('ip r a default via 192.168.16.1') H2.cmd('ip r a default via 192.168.16.1') # port mirroring all from s1 to IDS cmd = '''ovs-vsctl del-port s1-eth3''' results = subprocess.run(cmd, shell=True, universal_newlines=True, check=True).stdout print(results) cmd = '''ovs-vsctl add-port s1 s1-eth3 -- --id=@p get port s1-eth3 -- --id=@m create mirror name=m0 select-all=true output-port=@p -- set bridge s1 mirrors=@m''' results = subprocess.run(cmd, shell=True, universal_newlines=True, check=True).stdout CLI(net) net.stop()
def topoloy(): """ Topologia final sobre ContainerNet `user_leg` usuarios legitimos,`bot` hosts bots infectados, `vict` servidores `dns` servidor DNS en la topologia, `ids` IDS snort sniffer. :return: `void`: """ # setLogLevel('info') logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # ############################################ # Config Virtual Switches # ############################################ cls = OVSSwitch # ############################################ # Config Controller # ############################################ onos = RemoteController('onos', ip=IP_CONTROLLER, port=PORT_CONTROLLER) net = Containernet(controller=RemoteController, link=TCLink) logger.info("Adding controller") # info('*** Adding controller\n') net.addController(onos) # ############################################ # Docker containers. # ############################################ logger.info('Adding docker containers') # -------------------------------------- # IDS: Snort 4.x # -------------------------------------- ids = [ net.addDocker('ids1', ip='192.168.12.3/16', dimage=DI_SNORT, mem_limit=MEM_IDS, dmcd='./start.sh'), net.addDocker('ids2', ip='192.168.13.3/16', dimage=DI_SNORT, mem_limit=MEM_IDS, dmcd='./start.sh'), net.addDocker('ids3', ip='192.168.14.3/16', dimage=DI_SNORT, mem_limit=MEM_IDS, dmcd='./start.sh'), net.addDocker('ids4', ip='192.168.15.3/16', dimage=DI_SNORT, mem_limit=MEM_IDS, dmcd='./start.sh') ] # -------------------------------------- # USER: ubuntu trusty # -------------------------------------- users = [ net.addDocker('user_leg0', ip='192.168.4.10/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg1', ip='192.168.4.11/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg2', ip='192.168.5.12/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg3', ip='192.168.5.13/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg4', ip='192.168.6.14/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg5', ip='192.168.6.15/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg6', ip='192.168.7.16/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg7', ip='192.168.7.17/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg8', ip='192.168.8.18/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg9', ip='192.168.8.19/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg10', ip='192.168.9.71/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD), net.addDocker('user_leg11', ip='192.168.9.72/16', dimage=DI_USER, mem_limit=MEM_USER, dmcd=USER_CMD) ] # -------------------------------------- # SERVER: Apache 2.4 # -------------------------------------- vict = [ net.addDocker('vict0', ip='192.168.10.5/16', dimage=DI_SERVER, mem_limit=MEM_SERVER, dmcd='httpd-foreground'), net.addDocker('vict1', ip='192.168.11.50/16', dimage=DI_SERVER, mem_limit=MEM_SERVER, dmcd='httpd-foreground'), net.addDocker('vict2', ip='192.168.11.51/16', dimage=DI_SERVER, mem_limit=MEM_SERVER, dmcd='httpd-foreground') ] # -------------------------------------- # BOT: Ubuntu trustu # -------------------------------------- bots = [ net.addDocker('bot0', ip='192.168.4.20/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot1', ip='192.168.5.21/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot2', ip='192.168.6.22/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot3', ip='192.168.7.23/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot4', ip='192.168.8.24/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot5', ip='192.168.9.25/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot6', ip='192.168.5.26/16', dimage=DI_BOT, mem_limit=MEM_BOT), net.addDocker('bot7', ip='192.168.8.27/16', dimage=DI_BOT, mem_limit=MEM_BOT), ] # ############################################ # IDS ADD NETS ONOS # ############################################ subprocess.run( ['docker', 'network', 'connect', 'envr_control_net', 'mn.ids1']) subprocess.run( ['docker', 'network', 'connect', 'envr_control_net', 'mn.ids2']) subprocess.run( ['docker', 'network', 'connect', 'envr_control_net', 'mn.ids3']) subprocess.run( ['docker', 'network', 'connect', 'envr_control_net', 'mn.ids4']) # ############################################ # Switches # ############################################ logger.info('Adding switches') # Core s1 = net.addSwitch('s1', cls=cls, protocols=OF_PROTOL) s2 = net.addSwitch('s2', cls=cls, protocols=OF_PROTOL) # Border s3 = net.addSwitch('s3', cls=cls, protocols=OF_PROTOL) # Distribution s4 = net.addSwitch('s4', cls=cls, protocols=OF_PROTOL) s5 = net.addSwitch('s5', cls=cls, protocols=OF_PROTOL) s6 = net.addSwitch('s6', cls=cls, protocols=OF_PROTOL) s7 = net.addSwitch('s7', cls=cls, protocols=OF_PROTOL) # Service s8 = net.addSwitch('s8', cls=cls, protocols=OF_PROTOL) s12 = net.addSwitch('s12', cls=cls, protocols=OF_PROTOL) # Access s9 = net.addSwitch('s9', cls=cls, protocols=OF_PROTOL) s10 = net.addSwitch('s10', cls=cls, protocols=OF_PROTOL) s11 = net.addSwitch('s11', cls=cls, protocols=OF_PROTOL) s13 = net.addSwitch('s13', cls=cls, protocols=OF_PROTOL) s14 = net.addSwitch('s14', cls=cls, protocols=OF_PROTOL) s15 = net.addSwitch('s15', cls=cls, protocols=OF_PROTOL) # ############################################ # Links # ############################################ logger.info('Creating links') net.addLink(s3, s1, bw=BW_CORE_TO_CORE) # BW [Mbits / s] net.addLink(s3, s2, bw=BW_CORE_TO_CORE) net.addLink(s2, s1, bw=BW_CORE_TO_CORE) # BW [Mbits / s] net.addLink(s5, s4, bw=BW_DISTRIBUTION_TO_DISTRIBUTION) net.addLink(s1, s5, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s2, s5, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s1, s4, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s2, s4, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s6, s7, bw=BW_DISTRIBUTION_TO_DISTRIBUTION) net.addLink(s1, s7, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s2, s7, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s1, s6, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s2, s6, bw=BW_CORE_TO_DISTRIBUTION) net.addLink(s8, s4, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s9, s4, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s10, s4, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s11, s4, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s8, s5, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s9, s5, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s10, s5, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s11, s5, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s12, s6, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s13, s6, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s14, s6, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s15, s6, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s12, s7, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s13, s7, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s14, s7, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(s15, s7, bw=BW_DISTRIBUTION_TO_EDGE) net.addLink(vict[0], s8, bw=BW_SERVER) net.addLink(vict[1], s12, bw=BW_SERVER) net.addLink(vict[2], s12, bw=BW_SERVER) # Corregir archivo snort.conf""" net.addLink(ids[0], s4, bw=BW_IDS) net.addLink(ids[1], s5, bw=BW_IDS) net.addLink(ids[2], s6, bw=BW_IDS) net.addLink(ids[3], s7, bw=BW_IDS) net.addLink(bots[0], s9, bw=BW_CLIENT) net.addLink(bots[1], s10, bw=BW_CLIENT) net.addLink(bots[2], s11, bw=BW_CLIENT) net.addLink(bots[3], s13, bw=BW_CLIENT) net.addLink(bots[4], s14, bw=BW_CLIENT) net.addLink(bots[5], s15, bw=BW_CLIENT) net.addLink(bots[6], s10, bw=BW_CLIENT) net.addLink(bots[7], s14, bw=BW_CLIENT) net.addLink(users[0], s9, bw=BW_CLIENT) net.addLink(users[1], s9, bw=BW_CLIENT) net.addLink(users[2], s10, bw=BW_CLIENT) net.addLink(users[3], s10, bw=BW_CLIENT) net.addLink(users[4], s11, bw=BW_CLIENT) net.addLink(users[5], s11, bw=BW_CLIENT) net.addLink(users[6], s13, bw=BW_CLIENT) net.addLink(users[7], s13, bw=BW_CLIENT) net.addLink(users[8], s14, bw=BW_CLIENT) net.addLink(users[9], s14, bw=BW_CLIENT) net.addLink(users[10], s15, bw=BW_CLIENT) net.addLink(users[11], s15, bw=BW_CLIENT) # ############################################ # Conatainers Comands # ############################################ for i in range(users.__len__()): users[i].cmd('ip route add 192.168.0.0/16 dev user_leg' + str(i) + '-eth0') for i in range(bots.__len__()): bots[i].cmd('ip route add 192.168.0.0/16 dev bot' + str(i) + '-eth0') for i in range(ids.__len__()): ids[i].cmd('ip route add 192.168.0.0/16 dev ids' + str(i + 1) + '-eth0') for i in range(vict.__len__()): vict[i].cmd('ip route add 192.168.0.0/16 dev vict' + str(i) + '-eth0') # ############################################ # START NETWORK # ############################################ logger.info('Starting network') net.start() # Run httpd-foreground command for i in range(vict.__len__()): vict[i].start() # Run Snort Sniffer for i in range(ids.__len__()): ids[i].start() logger.info('Testing connectivity') net.pingAll() # Run init Cmd for i in range(users.__len__()): users[i].start() logger.info('Running CLI') CLI(net) # ############################################ # END # ############################################ logger.info('Stopping network') net.stop()
def topology(num=100): info( "Creating a network with docker containers acting as hosts and wireless mesh network environment.\n" ) net = Containernet(controller=Controller) info('*** Adding docker containers\n') ## In automated way will create Hosts dh = [] listPos = positions(num) prepareGraph(num) sys.exit() print("** Creating %d Station(s) " % (num)) for x in range(0, num): ip = (254 - x) posx, posy, posz, r = getPosition(listPos) ## This will create the hosts with image ubuntu:trusty, position and range of the device ## other information can be added later dh.append( net.addDocker('d' + str(x), cls=Docker, ip='10.0.0.' + str(ip), dimage="ubuntu:trusty", position=str(posx) + ',' + str(posy) + ',' + str(posz), range=r)) d = dh[x] sys.stdout.write(str(d) + " ") sys.stdout.flush() info("\n** Adding nodes to Mesh\n") for x in dh: net.addMesh(x, ssid='meshNet') #pdb.set_trace() meshr = net.meshRouting("mesh") info("** Routing nodes through mesh\n") for x in dh: meshRouting.customMeshRouting(x, 0, net.wifiNodes) sys.stdout.write(str(x) + " ") sys.stdout.flush() info('\n*** Starting network\n') net.build( ) ## Build should do the same as start but it will interconnect hosts #net.start() seed_node = "10.0.0.254:5001" ## seed will be always the first container! info("** Configuring node(s)\n") for d in dh: sys.stdout.write(str(d) + " - ") sys.stdout.flush() port = 5001 dev = str(d) + "-mp0" ## Calling inside script to configure each container nn = d.cmd( "/bin/bash /home/config-serf.sh config rpc=127.0.0.1:7373 port=" + str(port) + " dev=" + str(dev) + " seed=" + str(seed_node)) if nn: print("Configured for 10.0.0.%s:%d in device %s " % (str(ip), port, dev)) print("> %s " % (nn)) else: print("Error in configuring %s " % (str(d))) info('*** Starting our simulation\n') slpv = 10 * num / 4 ### SLEEP VARIABLE!!!! #This will start our simulation of SERF+monitor #Calling our inside script to start the processes st = time.time() with open('/home/simul.' + str(num) + '-' + str(int(time.time())) + '.txt', "a+") as myfile: for d in range(0, num): dh[d].sendCmd("/bin/bash /home/config-serf.sh test " + str(slpv) + " 2>&1 /dev/null &") see_pub(dh, out=myfile) #dh[d+1].cmd("/bin/bash /home/config-serf.sh test 50") time.sleep(1) ## So that each will start after each other info("** All nodes have published services.\n") info("** waiting for them to end.") et = 0 while ((st + slpv) - et) >= 0: ## while within the time frame #for i in range(0,slpv): sys.stdout.write(".") sys.stdout.flush() see_pub(dh, out=myfile) #time.sleep(1) et = time.time() sys.stdout.flush() myfile.flush() print("\n") printTopo(net, num=num) info("*** Simulation has ended (?)\n") info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
#!/usr/bin/python """ This is the most simple example to showcase Containernet. """ from mininet.net import Containernet from mininet.node import POX, RemoteController from mininet.cli import CLI from mininet.link import TCLink from mininet.log import info, setLogLevel setLogLevel('info') #net = Containernet(controller=POX) net = Containernet() info('*** Adding controller\n') #net.addController('c0', poxArgs = 'forwarding.droplist') net.addController('c0', controller=RemoteController, ip='127.0.0.1', port=6653) info('*** Adding docker containers\n') d1 = net.addDocker('d1', ip='10.0.0.251', dimage="tcpebpf") d2 = net.addDocker('d2', ip='10.0.0.252', dimage="tcpebpf") info('*** Adding switches\n') s1 = net.addSwitch('s1') info('*** Creating links\n') net.addLink(d1, s1, cls=TCLink, delay='100ms', bw=1) net.addLink(s1, d2) info('*** Starting network\n') net.start() #info('*** Testing connectivity\n') #net.ping([d1, d2]) CLI(net) info('*** Stopping network') net.stop()
import time from mininet.net import Containernet from mininet.node import Controller, RemoteController from mininet.cli import CLI from mininet.link import TCLink from mininet.log import info, setLogLevel from functools import partial from vn import install_vns, init_vns setLogLevel('info') net = Containernet(controller=partial(RemoteController, ip='172.17.0.1', port=6633), autoSetMacs=True) info('*** Adding controller\n') net.addController('c0') info('*** Adding gateways\n') gw1 = net.addDocker('gw1', ip='210.0.0.200', mac="00:00:00:00:00:01", dimage="mg-ids", pubnet=True) gw2 = net.addDocker('gw2', ip='210.0.0.200',
def tfTopo(): net = Containernet(topo=None, controller=RemoteController, switch=OVSKernelSwitch) net.addController('c0', RemoteController, ip="127.0.0.1", port=6633) #Arguments opts, args = getopt.getopt(sys.argv[1:], "", ["flows=", "dos="]) for o, a in opts: if o == "--flows": number_of_flows = int(a) print "Flows: ", a elif o in ("--dos"): number_of_dos = int(a) print "DoS: ", a # Hosts h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01') h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02') p1 = net.addHost('p1', ip='10.0.1.1', mac='00:00:00:00:01:01', cls=Docker, dimage='gmiotto/click', mem_limit=1024 * 1024 * 1024, cpu_shares=1024, cpu_quota=pop_cpu_percentage * 100, cpu_period=10000, device_write_bps='/dev/sda:512mb', device_write_iops='/dev/sda:1000') #p2 = net.addHost('p2', ip='10.0.1.2', mac='00:00:00:00:01:02', cls=Docker, dimage='progrium/stress',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000) #Switches s1 = net.addSwitch('s1') #PoP Hosts #net.addLink(p1,s1, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss) net.addLink(p1, s1) net.addLink(p1, s1) #Normal Hosts net.addLink(h1, s1) net.addLink(h2, s1) net.start() call("sudo ovs-ofctl add-flow s1 in_port=4,actions=output:1", shell=True) call("sudo ovs-ofctl add-flow s1 in_port=2,actions=output:3", shell=True) call("sudo ovs-ofctl add-flow s1 in_port=3,actions=output:4", shell=True) for host in net.hosts: if "h" in host.name: host.cmd('ethtool -K %s-eth0 tso off' % host.name) for host in net.hosts: if "p1" in host.name: call("sudo bash Click/runClickFunction.sh %s Click/firewall.click " % host.name, shell=True) test_duration = 60 interval_duration = 5 cgroup_options = "--cpu-quota=5000 --cpu-period=10000 --memory='1073741824' --device-write-bps='/dev/sda:512mb' --device-write-iops='/dev/sda:1000' --device-read-bps='/dev/sda:512mb' --device-read-iops='/dev/sda:1000' --memory-swappiness='0' --shm-size='0'" h2.cmd('iperf3 -s &') time.sleep(5) h1.cmd('sudo bash Measurement/meas.sh 0 0 0 %s >> Results/results.dat & ' % test_duration) time.sleep(test_duration) time.sleep(interval_duration) print "CPU ATK 1 0 0" #CPU ATK, no cgroups h1.cmd('sudo bash Measurement/meas.sh 1 0 0 %s >> Results/results.dat & ' % test_duration) call("sudo docker run --rm -it progrium/stress --cpu 20 --timeout %ss" % test_duration, shell=True) time.sleep(interval_duration) print "CPU ATK 1 1 0" #CPU ATK, with cgroups at 50% h1.cmd('sudo bash Measurement/meas.sh 1 1 0 %s >> Results/results.dat & ' % test_duration) call("sudo docker run --rm %s -it progrium/stress --cpu 20 --timeout %ss" % (cgroup_options, test_duration), shell=True) time.sleep(interval_duration) print "MEM ATK 2 0 0" #MEM ATK, no cgroups h1.cmd('sudo bash Measurement/meas.sh 2 0 0 %s >> Results/results.dat & ' % test_duration) call( "sudo docker run --rm -it progrium/stress --vm 4 --vm-bytes 256M --timeout %ss" % test_duration, shell=True) time.sleep(interval_duration) print "MEM ATK 2 1 0" #MEM ATK, with cgroups h1.cmd('sudo bash Measurement/meas.sh 2 1 0 %s >> Results/results.dat & ' % test_duration) call( "sudo docker run --rm %s -it progrium/stress --vm 4 --vm-bytes 256M --timeout %ss" % (cgroup_options, test_duration), shell=True) time.sleep(interval_duration) print "HD ATK 3 0 0" #HDD ATK, no cgroups h1.cmd('sudo bash Measurement/meas.sh 3 0 0 %s >> Results/results.dat & ' % test_duration) call( "sudo docker run --rm -it progrium/stress --hdd 10 --hdd-bytes 1048576 --timeout %ss" % test_duration, shell=True) time.sleep(interval_duration) print "HD ATK 3 1 0" #HDD ATK, with cgroups h1.cmd('sudo bash Measurement/meas.sh 3 1 0 %s >> Results/results.dat & ' % test_duration) call( "sudo docker run --rm %s -it progrium/stress --hdd 10 --hdd-bytes 1048576 --timeout %ss" % (cgroup_options, test_duration), shell=True) time.sleep(interval_duration) print "NET ATK 4 0 0" #NET IO ATK, no cgroups h1.cmd('sudo bash Measurement/meas.sh 4 0 0 %s >> Results/results.dat & ' % test_duration) call("sudo docker run --rm -it progrium/stress --io 10 --timeout %ss" % test_duration, shell=True) time.sleep(interval_duration) print "NET ATK 4 1 0" #NET IO ATK, no cgroups h1.cmd('sudo bash Measurement/meas.sh 4 1 0 %s >> Results/results.dat & ' % test_duration) call("sudo docker run --rm %s -it progrium/stress --io 10 --timeout %ss" % (cgroup_options, test_duration), shell=True) time.sleep(interval_duration) #CLI(net) net.stop()
def topology(): "Create a network with some docker containers acting as hosts." net = Containernet(controller=Controller) info('*** Adding controller\n') net.addController('c0') info('*** Adding hosts\n') h1 = net.addHost('h1') h2 = net.addHost('h2') info('*** Adding docker containers\n') d1 = net.addDocker('d1', ip='10.0.0.251', dimage="ubuntu:trusty") d2 = net.addDocker('d2', ip='10.0.0.252', dimage="ubuntu:trusty", cpu_period=50000, cpu_quota=25000) d3 = net.addHost('d3', ip='11.0.0.253', cls=Docker, dimage="ubuntu:trusty", cpu_shares=20) # using advanced features like volumes and exposed ports d5 = net.addDocker('d5', dimage="ubuntu:trusty", volumes=["/:/mnt/vol1:rw"], ports=[9999], port_bindings={9999: 9999}, publish_all_ports=True) info('*** Adding switch\n') s1 = net.addSwitch('s1') s2 = net.addSwitch('s2', cls=OVSSwitch) s3 = net.addSwitch('s3') info('*** Creating links\n') net.addLink(h1, s1) net.addLink(s1, d1) net.addLink(h2, s2) net.addLink(d2, s2) net.addLink(s1, s2) #net.addLink(s1, s2, cls=TCLink, delay="100ms", bw=1, loss=10) # try to add a second interface to a docker container net.addLink(d2, s3, params1={"ip": "11.0.0.254/8"}) net.addLink(d3, s3) info('*** Starting network\n') net.start() net.ping([d1, d2]) # our extended ping functionality net.ping([d1], manualdestip="10.0.0.252") net.ping([d2, d3], manualdestip="11.0.0.254") info('*** Dynamically add a container at runtime\n') d4 = net.addDocker('d4', dimage="ubuntu:trusty") # we have to specify a manual ip when we add a link at runtime net.addLink(d4, s1, params1={"ip": "10.0.0.254/8"}) # other options to do this #d4.defaultIntf().ifconfig("10.0.0.254 up") #d4.setIP("10.0.0.254") net.ping([d1], manualdestip="10.0.0.254") info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
def run(): "Test linux router" net = Containernet ( controller=Controller ) # controller is used by s1-s3 net.addController('c0', port=6654) defaultIP = '10.0.0.1/24' # IP address for r0-eth1 info('*** Adding routers\n') r1 = net.addHost('r1', cls=LinuxRouter, ip='10.0.0.1/24') r2 = net.addHost('r2', cls=LinuxRouter, ip='10.1.0.1/24') # r3 = net.addHost('r3', cls=LinuxRouter, ip='10.2.0.1/24') info('*** Adding switches\n') s1, s2, s3 = [net.addSwitch(s) for s in ('s1', 's2', 's3')] info('*** Adding host-switch links\n') net.addLink(s1, r1, intfName2='r1-eth1', params2={'ip': '10.0.0.1/24'}) net.addLink(s2, r2, intfName2='r2-eth1', params2={'ip': '10.1.0.1/24'}) # net.addLink(s3, r3, intfName2='r3-eth1', # params2={'ip': '10.2.0.1/24'}) info('*** 1) Adding switch-switch link\n') net.addLink(r1, r2, intfName1='r1-eth2', intfName2='r2-eth2', params1={'ip': '10.100.0.1/24'}, params2={'ip': '10.100.0.2/24'}) # net.addLink(r1, r2, intfName1='r1-eth2', intfName2='r2-eth2', params1={'ip': '10.100.0.1/24'}, params2={'ip': '10.100.0.2/24'}) # info('*** 2) Adding switch-switch link\n') # net.addLink(r2, r3, intfName1='r2-eth3', intfName2='r3-eth2', params1={'ip': '10.2.0.1/24'}, # params2={'ip': '10.1.0.1/24'}) # net.addLink(r2, r3, intfName1='r2-eth3', intfName2='r3-eth2', params1={'ip': '10.200.0.1/24'}, params2={'ip': '10.200.0.2/24'}) # info('*** 3) Adding switch-switch link\n') # info('*** 3) Adding switch-switch link\n') # net.addLink(r1, r3, intfName1='r1-eth3', intfName2='r3-eth3', params1={'ip': '10.2.0.1/24'}, # params2={'ip': '10.0.0.1/24'}) # net.addLink(r1, r3, intfName1='r1-eth3', intfName2='r3-eth3', params1={'ip': '10.300.0.1/24'}, params2={'ip': '10.300.0.2/24'}) r1.cmd("ip route add 10.1.0.0/24 via 10.100.0.1") r2.cmd("ip route add 10.0.0.0/24 via 10.100.0.2") r1.cmd("echo 1 > /proc/sys/net/ipv4/ip_forward") r2.cmd("echo 1 > /proc/sys/net/ipv4/ip_forward") # # r2.cmd("ip route add 10.2.0.0/24 via 10.200.0.1") # r3.cmd("ip route add 10.1.0.0/24 via 10.200.0.2") # # r1.cmd("ip route add 10.2.0.0/24 via 10.300.0.1") # r3.cmd("ip route add 10.0.0.0/24 via 10.300.0.2") d1 = net.addHost(name='d1', ip='10.0.0.251/24', defaultRoute='via 10.0.0.1') d2 = net.addHost(name='d2', ip='10.1.0.252/24', defaultRoute='via 10.1.0.1') # d3 = net.addHost(name='d3', ip='10.3.0.253/24', defaultRoute='via 10.2.0.1') # d1 = net.addDocker(name='d1', ip='10.0.0.251/24', defaultRoute='via 10.0.0.1', ports=[1883], port_bindings={1883: 1883}, dimage=IMAGE_NAME, # environment={"EMQX_NAME": "docker1", # "EMQX_HOST": "10.0.0.251", # "EMQX_NODE__DIST_LISTEN_MAX": 6379, # "EMQX_LISTENER__TCP__EXTERNAL": 1883, # "EMQX_CLUSTER__DISCOVERY": "static", # "EMQX_CLUSTER__STATIC__SEEDS": "[email protected]"}) # # d2 = net.addDocker(name='d2', ip='10.1.0.252/24', defaultRoute='via 10.1.0.1', ports=[1883], port_bindings={1883: 1884}, dimage=IMAGE_NAME, # environment={"EMQX_NAME": "docker2", # "EMQX_HOST": "10.1.0.252", # "EMQX_NODE__DIST_LISTEN_MAX": 6379, # "EMQX_LISTENER__TCP__EXTERNAL": 1883, # "EMQX_CLUSTER__DISCOVERY": "static", # "EMQX_CLUSTER__STATIC__SEEDS": "[email protected]"}) # # d3 = net.addDocker(name='d3', ip='10.2.0.253/24', defaultRoute='via 10.2.0.1', ports=[1883], # port_bindings={1883: 1885}, dimage=IMAGE_NAME, # environment={"EMQX_NAME": "docker3", # "EMQX_HOST": "10.2.0.253", # "EMQX_NODE__DIST_LISTEN_MAX": 6379, # "EMQX_LISTENER__TCP__EXTERNAL": 1883, # "EMQX_CLUSTER__DISCOVERY": "static", # "EMQX_CLUSTER__STATIC__SEEDS": "[email protected]"}) for d, s in [(d1, s1), (d2, s2)]: info(net.addLink(d, s)) # info(net.addLink(d1, s1, cls=TCLink, delay='10ms', intfName2='d1-eth1')) # info(net.addLink(d2, s2, cls=TCLink, delay='10ms', intfName2='d2-eth1')) # info(net.addLink(d3, s3, cls=TCLink, delay='10ms', intfName2='d3-eth1')) info('*** Starting network\n') net.start() net.staticArp() info('*** Routing Table on Router:\n') print((net['r1'].cmd('route'))) info('*** Routing Table on Router:\n') print((net['r2'].cmd('route'))) # info('*** Routing Table on Router:\n') # print((net['r3'].cmd('route'))) info('*** Testing connectivity\n') net.pingAll() # net.ping([r1, r2]) info('*** Starting brokers\n') # d1.start() # d2.start() # d3.start() CLI(net) net.stop()
#!/usr/bin/python """ This topology is used to test the compatibility of different Docker images. The images to be tested can be found in 'examples/example-containers'. They are build with './build.sh' """ from mininet.net import Containernet from mininet.node import Controller from mininet.cli import CLI from mininet.link import TCLink from mininet.log import info, setLogLevel setLogLevel('info') net = Containernet(controller=Controller) info('*** Adding controller\n') net.addController('c0') info('*** Adding docker containers\n') d1 = net.addDocker('d1', dimage="ubuntu:trusty") d2 = net.addDocker('d2', dimage="containernet_example:ubuntu1404") d3 = net.addDocker('d3', dimage="containernet_example:ubuntu1604") d4 = net.addDocker('d4', dimage="containernet_example:ubuntu1804") d5 = net.addDocker('d5', dimage="containernet_example:centos6") d6 = net.addDocker('d6', dimage="containernet_example:centos7") d7 = net.addDocker('d7', dimage="containernet_example:lamp") d8 = net.addDocker('d8', dimage="containernet_example:haproxy") d9 = net.addDocker('d9', dimage="containernet_example:ubuntup4") info('*** Adding switches\n') s1 = net.addSwitch('s1')
#!/usr/bin/python from mininet.net import Containernet from mininet.node import Controller from mininet.cli import CLI from mininet.log import info, setLogLevel from mininet.bmv2 import ONOSBmv2Switch, P4DockerHost setLogLevel('info') class NormalP4Switch(ONOSBmv2Switch): def __init__(self, name, **kwargs): ONOSBmv2Switch.__init__(self, name, **kwargs) self.netcfg = False net = Containernet(controller=Controller, switch=NormalP4Switch) info('*** Adding controller\n') net.addController('c0') info('*** Adding docker containers\n') # Fake HOST d1 = net.addDocker('d1', cls=P4DockerHost, ip='192.168.1.100', dimage="containernet_example:ubuntup4", mac="00:00:00:00:00:01") d1.start() # HOST h1 = net.addHost('h1', ip='192.168.1.104', mac="00:00:00:00:00:04")
def _create_network(self): # self.net = Containernet(autoStaticArp=True, controller=RemoteController) # c1 = RemoteController( 'c1', ip='127.0.0.1', port=6653, protocols=["OpenFlow13"] ) # self.net.addController(c1) self.net = Containernet(autoStaticArp=True, controller=Controller) self.net.addController('c0')
def tfTopo(): net = Containernet(topo=None, controller=RemoteController, switch=OVSKernelSwitch) net.addController('c0', RemoteController, ip="127.0.0.1", port=6633) #Arguments opts, args = getopt.getopt(sys.argv[1:], "", ["flows=", "dos="]) for o, a in opts: if o == "--flows": number_of_flows = int(a) print "Flows: ", a elif o in ("--dos"): number_of_dos = int(a) print "DoS: ", a # Hosts h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01') h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02') p1 = net.addHost('p1', ip='10.0.1.1', mac='00:00:00:00:01:01', cls=Docker, dimage='gmiotto/click', mem_limit=1024 * 1024 * 1024, cpu_shares=1024, cpu_quota=pop_cpu_percentage * 100, cpu_period=10000, device_write_bps='/dev/sda:512mb', device_write_iops='/dev/sda:1000') #p2 = net.addHost('p2', ip='10.0.1.2', mac='00:00:00:00:01:02', cls=Docker, dimage='progrium/stress',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000) #Switches s1 = net.addSwitch('s1') #PoP Hosts #net.addLink(p1,s1, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss) net.addLink(p1, s1) net.addLink(p1, s1) #Normal Hosts net.addLink(h1, s1) net.addLink(h2, s1) net.start() call("sudo ovs-ofctl add-flow s1 in_port=3,actions=output:1", shell=True) call("sudo ovs-ofctl add-flow s1 in_port=2,actions=output:4", shell=True) call("sudo ovs-ofctl add-flow s1 in_port=4,actions=output:3", shell=True) for host in net.hosts: if "h" in host.name: host.cmd('ethtool -K %s-eth0 tso off' % host.name) for host in net.hosts: if "p1" in host.name: call("sudo bash Click/runClickFunction.sh %s Click/DPI.click " % host.name, shell=True) test_duration = 20 interval_duration = 5 cgroup_options = "--cpu-quota=5000 --cpu-period=10000 --memory='1073741824' --device-write-bps='/dev/sda:512mb' --device-write-iops='/dev/sda:1000' --device-read-bps='/dev/sda:512mb' --device-read-iops='/dev/sda:1000' --memory-swappiness='0' --shm-size='0'" h2.cmd('iperf3 -s &') CLI(net) net.stop()