def __init__(self, n=2, **opts): Topo.__init__(self, **opts) # set up inet switch inetSwitch = self.addSwitch('s0') # add inet host inetHost = self.addHost('h0') self.addLink(inetSwitch, inetHost) # add local nets for i in irange(1, n): inetIntf = 'nat%d-eth0' % i localIntf = 'nat%d-eth1' % i localIP = '192.168.%d.1' % i localSubnet = '192.168.%d.0/24' % i natParams = {'ip': '%s/24' % localIP} # add NAT to topology nat = self.addNode('nat%d' % i, cls=NAT, subnet=localSubnet, inetIntf=inetIntf, localIntf=localIntf) switch = self.addSwitch('s%d' % i) # connect NAT to inet and local switches self.addLink(nat, inetSwitch, intfName1=inetIntf) self.addLink(nat, switch, intfName1=localIntf, params1=natParams) # add host and connect to local switch host = self.addHost('h%d' % i, ip='192.168.%d.100/24' % i, defaultRoute='via %s' % localIP) self.addLink(host, switch)
def __init__(self, hosts=2, bwlimit=10, lat=0.1, **opts): Topo.__init__(self, **opts) tor = [] numLeafes = hosts bw = bwlimit s = 1 #bw = 10 for i in range(numLeafes): h = self.addHost('h' + str(i + 1), mac=self.makeMAC(i), ip="10.0.0." + str(i + 1)) sw = self.addSwitch('s' + str(s), dpid=self.makeDPID(s), **dict(listenPort=(13000 + s - 1))) s = s + 1 self.addLink(h, sw, bw=bw, delay=str(lat) + "ms") tor.append(sw) toDo = tor # nodes that have to be integrated into the tree while len(toDo) > 1: newToDo = [] for i in range(0, len(toDo), 2): sw = self.addSwitch('s' + str(s), dpid=self.makeDPID(s), **dict(listenPort=(13000 + s - 1))) s = s + 1 newToDo.append(sw) self.addLink(toDo[i], sw, bw=bw, delay=str(lat) + "ms") if len(toDo) > (i + 1): self.addLink(toDo[i + 1], sw, bw=bw, delay=str(lat) + "ms") toDo = newToDo bw = 2.0 * bw
def __init__(self, n, **kwargs): Topo.__init__(self, **kwargs) h1, h2 = self.addHost('h1'), self.addHost('h2') s1 = self.addSwitch('s1') for _ in range(n): self.addLink(s1, h1) self.addLink(s1, h2)
def __init__(self, n=2, hopts=None, lopts=None): if not hopts: hopts = {} if not lopts: lopts = {} Topo.__init__(self, hopts=hopts, lopts=lopts) switch = self.addSwitch('s1') for h in range(n): host = self.addHost('h%s' % (h + 1)) self.addLink(host, switch)
def __init__(self, n, dataController=DataController, **kwargs): """n: number of data network controller nodes dataController: class for data network controllers""" Topo.__init__(self, **kwargs) # Connect everything to a single switch cs0 = self.addSwitch('cs0') # Add hosts which will serve as data network controllers for i in range(0, n): c = self.addHost('c%s' % i, cls=dataController, inNamespace=True) self.addLink(c, cs0) # Connect switch to root namespace so that data network # switches will be able to talk to us root = self.addHost('root', inNamespace=False) self.addLink(root, cs0)
def __init__(self): "Create custom topo." # Initialize topology Topo.__init__(self) # Add hosts and switches leftHost = self.addHost('h1') rightHost = self.addHost('h2') leftSwitch = self.addSwitch('s3') rightSwitch = self.addSwitch('s4') # Add links self.addLink(leftHost, leftSwitch) self.addLink(leftSwitch, rightSwitch) self.addLink(rightSwitch, rightHost)
def testActualDpidAssignment(self): """Verify that Switch dpid is the actual dpid assigned if dpid is passed in switch creation.""" dpid = self.dpidFrom(0xABCD) switch = Mininet(Topo(), self.switchClass, Host, Controller).addSwitch('s1', dpid=dpid) self.assertEqual(switch.dpid, dpid)
def addLink(self, node1, node2, port1=None, port2=None, key=None, **opts): if isinstance(node1, VirtualInstance): node1 = node1.getSwitch() if isinstance(node2, VirtualInstance): node2 = node2.getSwitch() return Topo.addLink(self, node1, node2, port1, port2, key, **opts)
def testDefaultDpidLen(self): """Verify that Default dpid length is 16 characters consisting of 16 - len(hex of first string of contiguous digits passed in switch name) 0's followed by hex of first string of contiguous digits passed in switch name.""" switch = Mininet(Topo(), self.switchClass, Host, Controller).addSwitch('s123') self.assertEqual(switch.dpid, self.dpidFrom(123))
def testDefaultDpidAssignmentFailure(self): """Verify that Default dpid assignment raises an Exception if the name of the switch does not contin a digit. Also verify the exception message.""" with self.assertRaises(Exception) as raises_cm: Mininet(Topo(), self.switchClass, Host, Controller).addSwitch('A') self.assertEqual( raises_cm.exception.message, 'Unable to derive ' 'default datapath ID - please either specify a dpid ' 'or use a canonical switch name such as s23.')
def __init__(self, N, **params): # Initialize topology Topo.__init__(self, **params) # Create switches and hosts hosts = [self.addHost('h%s' % h) for h in irange(1, N)] switches = [self.addSwitch('s%s' % s) for s in irange(1, N - 1)] # Wire up switches last = None for switch in switches: if last: self.addLink(last, switch) last = switch # Wire up hosts self.addLink(hosts[0], switches[0]) for host, switch in zip(hosts[1:], switches): self.addLink(host, switch)
def __init__(self, n=2, lossy=True, **opts): Topo.__init__(self, **opts) switch = self.addSwitch('s1') for h in range(n): # Each host gets 50%/n of system CPU host = self.addHost('h%s' % (h + 1), cpu=.5 / n) if lossy: # 10 Mbps, 5ms delay, 10% packet loss self.addLink(host, switch, bw=10, delay='5ms', loss=10, use_htb=True) else: # 10 Mbps, 5ms delay, no packet loss self.addLink(host, switch, bw=10, delay='5ms', loss=0, use_htb=True)
def _parse_metis_result(self, filepath, n): for i in range(0, n): self.partitions.append(Topo()) f = open(filepath, "r") i = 1 switch_to_part = {} for line in f: part = int(line) switch_to_part[self.pos[i]] = part self.partitions[part].addNode(self.pos[i], **self.topo.nodeInfo(self.pos[i])) i = i + 1 f.close() self._add_links(switch_to_part)
def partition_using_map(self, mapping): """ Partition loaded topology without metis but with mapping dictionary. Dictionary has to contain reference "nodename"->workerid for every node in topology. """ self.tunnels = [] self.partitions = [] for i in range(0, max(mapping.values()) + 1): self.partitions.append(Topo()) print mapping switch_to_part = {} for switch in self.switches: if (not switch in mapping): raise RuntimeError("no mapping for " + switch + " found") switch_to_part[switch] = mapping[switch] self.partitions[mapping[switch]].addNode( switch, **self.topo.nodeInfo(switch)) self._add_links(switch_to_part) return Clustering(self.partitions, self.tunnels)
def partition(self, n, shares=None): """Partition loaded topology into n partitions. Args: n: Number of partitions to create shares: list of workload shares for each partition. Must add up to 1 """ self.tunnels = [] self.partitions = [] if (n > 1 and len(self.switches) > 1): if (shares): tpw = "" for i in range(0, n): tpw += str(i) + " = " + str(shares[i]) + "\n" tpwf = self._write_to_file(tpw) outp = subprocess.check_output([ self.metisCMD + " -tpwgts=" + tpwf + " " + self.graph + " " + str(n) ], shell=True) os.remove(tpwf) else: outp = subprocess.check_output( [self.metisCMD + " " + self.graph + " " + str(n)], shell=True) self.logger.debug(outp) self._parse_metis_result(self.graph + ".part." + str(n), n) os.remove(self.graph + ".part." + str(n)) os.remove(self.graph) else: tpart = [self._convert_to_plain_topo(self.topo)] while (len(tpart) < n): tpart.append(Topo()) self.partitions = tpart return Clustering(self.partitions, self.tunnels)
def _convert_to_plain_topo(self, topo): """Convert topo to mininet.topo.Topo instance. This helper function allows the user to use topologys which are not direct instances of mininet.topo.Topo in MaxiNet. If the topology was not converted to a Topo instance the transfer via pyro most likely fails as the original class might not be available at the pyro remote. Args: topo: Instance which fullfills the interface of mininet.topo.Topo. Returns: Instance of mininet.topo.Topo, """ r = Topo() for node in topo.nodes(): r.addNode(node, **topo.nodeInfo(node)) for edge in topo.links(): r.addLink(**topo.linkInfo(edge[0], edge[1])) return r
# between a switch and a host if these are emulated at DIFFERENT workers # This limitation does (of course) NOT hold for links between switches. # # Dynamic adding and removing of nodes also does not work when using the # UserSwitch. import time from src.mininet.topo import Topo from src.mininet.node import OVSSwitch from src.maxinet.Frontend import maxinet_main from src.maxinet.tools import Tools # create topology topo = Topo() topo.addHost("h1", ip=Tools.makeIP(1), mac=Tools.makeMAC(1)) topo.addHost("h2", ip=Tools.makeIP(2), mac=Tools.makeMAC(2)) topo.addSwitch("s1", dpid=Tools.makeDPID(1)) topo.addLink("h1", "s1") topo.addLink("h2", "s1") # start cluster cluster = maxinet_main.Cluster(minWorkers=2, maxWorkers=2) # start experiment with OVSSwitch on cluster exp = maxinet_main.Experiment(cluster, topo, switch=OVSSwitch) exp.setup() print "waiting 5 seconds for routing algorithms on the controller to converge" time.sleep(5)
def testDefaultDpid(self): """Verify that the default dpid is assigned using a valid provided canonical switchname if no dpid is passed in switch creation.""" switch = Mininet(Topo(), self.switchClass, Host, Controller).addSwitch('s1') self.assertEqual(switch.defaultDpid(), switch.dpid)
#!/usr/bin/env python2 """ An example showing all available methods and attributes of the NodeWrapper for Docker hosts. """ from src.maxinet.Frontend import maxinet_main from src.maxinet.Frontend.container import Docker from src.mininet.topo import Topo from src.mininet.node import OVSSwitch topo = Topo() d1 = topo.addHost("d1", cls=Docker, ip="10.0.0.251", dimage="ubuntu:trusty") cluster = maxinet_main.Cluster() exp = maxinet_main.Experiment(cluster, topo, switch=OVSSwitch) exp.setup() try: node_wrapper = exp.get_node("d1") print("Testing methods:") print("=================") print("updateCpuLimit():") print("\t" + str(node_wrapper.updateCpuLimit( 10000, 10000, 1, "0-1"))) # cpu_quota, cpu_period, cpu_shares, cores print("updateMemoryLimit():") print("\t" + str(node_wrapper.updateMemoryLimit(300000))) print("cgroupGet():") print("\t" + str(node_wrapper.cgroupGet('cpus', resource='cpuset')))
#!/usr/bin/env python2 """ An example showing all available methods and attributes of the NodeWrapper for Docker hosts. """ from src.maxinet.Frontend import maxinet_main from src.maxinet.Frontend.libvirt import LibvirtHost from src.mininet.topo import Topo from src.mininet.node import OVSSwitch topo = Topo() vm1 = topo.addHost("vm1", cls=LibvirtHost, ip="10.0.0.251", disk_image="/srv/images/ubuntu16.04.qcow2") cluster = maxinet_main.Cluster() exp = maxinet_main.Experiment(cluster, topo, switch=OVSSwitch) exp.setup() try: node_wrapper = exp.get_node("vm1") print("Testing methods:") print("=================") print("updateCpuLimit():") print("\t" + str(node_wrapper.updateCpuLimit(10000, 10000, 1, { 0: "1", 1: "0" }))) # cpu_quota, cpu_period, cpu_shares, cores
def __init__(self, *args, **kwargs): Topo.__init__(self, *args, **kwargs) self.res_table = ResourcesTable()
FOG_NODES = ["f{}".format(x + 1) for x in range(3)] SENSOR_NODES = [ "h{}".format(x + 1) for x in range(SENSORS_PER_FOG * len(FOG_NODES)) ] fogs = [] sensors = [] MANAGER_ADDR = '10.0.0.1:2000' MANAGER_ENV = { "CLOUDS": 1, "FOGS": len(FOG_NODES), "SENSORS_PER_FOG": SENSORS_PER_FOG } topo = Topo() switch_idx = 1 info('**** Adding Manager\n') mngr = topo.addDocker('m1', ip=MANAGER_ADDR.split(':')[0], dimage="manager:latest", environment=MANAGER_ENV) sw_mngr = topo.addSwitch('s%d' % switch_idx) switch_idx += 1 topo.addLink(mngr, sw_mngr) info('*** Adding docker containers\n') info('*** Cloud\n') c1 = topo.addDocker('c1', ip='10.0.0.2',
def get_attribute(node, attribute): print(node.nn + "." + attribute) ok = False try: print("\t->" + str(node._get(attribute))) ok = True except Exception as e: print("\tFAILED") traceback.print_exc() if ok: print("\tOKAY") print topo = Topo() topo.addHost("h1", cls=CPULimitedHost, ip="10.0.0.251") topo.addHost("h2", cls=CPULimitedHost, ip="10.0.0.252") topo.addSwitch("s1") topo.addLink("h1", "s1") topo.addLink("h2", "s1") cluster = maxinet_main.Cluster() exp = maxinet_main.Experiment(cluster, topo, switch=OVSSwitch) exp.setup() h1 = exp.get_node("h1") h2 = exp.get_node("h2") s1 = exp.get_node("s1") print(