def cfsInfo( self, f): "Internal method: return parameters for CFS bandwidth" pstr, qstr = 'cfs_period_us', 'cfs_quota_us' # CFS uses wall clock time for period and CPU time for quota. quota = int( self.period_us * f * numCores() ) period = self.period_us if f > 0 and quota < 1000: debug( '(cfsInfo: increasing default period) ' ) quota = 1000 period = int( quota / f / numCores() ) return pstr, qstr, period, quota
def parseOptions(): "Parse command line options" parser = OptionParser() parser.add_option('-o', '--output', default=True, action='store_true', help='write output to file?') parser.add_option('-t', '--time', type='int', default=10, help='cpu-stress time interval') parser.add_option('-r', '--runs', type='int', default=1, help='Runs for each topo') parser.add_option('-c', '--counts', action='callback', callback=intListCallback, default=[2, 4], type='string', help='nodes in the network, e.g. 2,4') parser.add_option('-u', '--utils', action='callback', callback=floatListCallback, default=[0.5, 0.7, .9], type='string', help='target machine utilizations, e.g. .5,.7,.9') parser.add_option('-m', '--machine', default='local', type='string', help='name of machine') parser.add_option('-e', '--experiment', default='', type='string', help='name of experiment') parser.add_option('-s', '--static', default=False, action='store_true', help='statically allocate CPU to each host') parser.add_option('-b', '--bwsched', dest='sched', default='cfs', help='bandwidth scheduler: cfs (default) | rt | none') options, args = parser.parse_args() if options.sched not in ['cfs', 'rt', 'none']: print "CPU bandwidth scheduler should be either 'cfs' or 'rt' or 'none'." parser.print_help() exit(1) options.host = quietRun('hostname').strip() options.cores = numCores() return options, args
def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, controller=DefaultController, link=Link, intf=Intf, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, waitConnected=False, startup=None ): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.host = host self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) hostIP = ( 0xffffffff >> self.prefixLen ) & self.ipBaseNum # Start for address allocation self.nextIP = hostIP if hostIP > 0 else 1 self.inNamespace = inNamespace self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.waitConn = waitConnected self.startup = startup self.hosts = [] self.switches = [] self.controllers = [] self.links = [] self.nameToNode = {} # name to Node (Host/Switch) objects self.terms = [] # list of spawned xterm processes Mininet.init() # Initialize Mininet if necessary self.built = False if topo and build: self.build()
def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, controller=DefaultController, link=Link, intf=Intf, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, waitConnected=False ): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.host = host self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) hostIP = ( 0xffffffff >> self.prefixLen ) & self.ipBaseNum # Start for address allocation self.nextIP = hostIP if hostIP > 0 else 1 self.inNamespace = inNamespace self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.waitConn = waitConnected self.hosts = [] self.switches = [] self.controllers = [] self.links = [] self.nameToNode = {} # name to Node (Host/Switch) objects self.terms = [] # list of spawned xterm processes Mininet.init() # Initialize Mininet if necessary self.built = False if topo and build: self.build()
def parseOptions(): "Parse command line options" parser = OptionParser() parser.add_option( '-o', '--output', default=True, action='store_true', help='write output to file?' ) parser.add_option( '-t', '--time', type='int', default=10, help='cpu-stress time interval' ) parser.add_option( '-r', '--runs', type='int', default=1, help='Runs for each topo' ) parser.add_option( '-c', '--counts', action='callback', callback=intListCallback, default=[ 2, 4 ], type='string', help='nodes in the network, e.g. 2,4' ) parser.add_option( '-u', '--utils', action='callback', callback=floatListCallback, default=[ 0.5, 0.7, .9 ], type='string', help='target machine utilizations, e.g. .5,.7,.9' ) parser.add_option( '-m', '--machine', default='local', type='string', help='name of machine' ) parser.add_option( '-e', '--experiment', default='', type='string', help='name of experiment' ) parser.add_option( '-s', '--static', default=False, action='store_true', help='statically allocate CPU to each host' ) parser.add_option( '-b', '--bwsched', dest='sched', default='cfs', help='bandwidth scheduler: cfs (default) | rt | none' ) options, args = parser.parse_args() if options.sched not in [ 'cfs', 'rt', 'none' ]: print "CPU bandwidth scheduler should be either 'cfs' or 'rt' or 'none'." parser.print_help() exit( 1 ) options.host = quietRun( 'hostname' ).strip() options.cores = numCores() return options, args
def startRamcloud( self, cpu=.6 ): """Start Ramcloud cpu: CPU usage limit (in seconds/s)""" # Create a cgroup so Ramcloud doesn't eat all of our CPU ramcloud = CPULimitedHost( 'ramcloud', inNamespace=False, period_us=5000 ) ramcloud.setCPUFrac( cpu / numCores() ) info( '\n' ) ramcloud.cmd( 'export PATH=%s:$PATH' % self.onosDir ) ramcloud.cmd( 'export ONOS_LOGDIR=%s' % self.logDir ) for daemon in 'coord', 'server': ramcloud.cmd( 'onos.sh rc-%s start' % daemon ) pid = self.waitStart( 'Ramcloud %s' % daemon, 'obj.master/' + daemon ) self.waitNetstat( pid ) status = self.cmd( 'onos.sh rc-%s.sh status' % daemon ) if 'running' not in status: raise Exception( 'Ramcloud %s startup failed: ' % daemon + status ) self.ramcloud = ramcloud
def appendResults(net, outfile, n, cpu): result = [''] * n cmd = [None] * n # Command objects for CPU stressers monitor = [None] * n monitor_outfile = [None] * n # Filenames cpu_log = [None] * n info("Starting CPU stressors\n") # Start cpu-stressers for i in xrange(0, n): server = net.hosts[i] # run for 120 secs extra; terminated below scmd = '%s %d %d' % (CPUSTRESS, opts.time + 120, 0) server.cmd(scmd + '&') monitor_outfile[i] = '/tmp/%s_cpu.out' % server.name sleep(1) info("Starting CPU monitor\n") # Start cpu monitor startTime = int(time()) cpumon_length = opts.time # Was always one second. # Now we will try the following: since cpuacct is adjusted every # 10 ms, we should try to make sure that each process makes some # progress each time interval. # for a minimum cpu time of 20 ms, # the interval should be 20 ms * n / (cpu% * numCores()) cpumon_interval = 1.0 cpumon_min = .020 / cpu / numCores() if cpumon_interval < cpumon_min: cpumon_interval = cpumon_min print "Adjusting cpumon_interval to %.2f seconds" % cpumon_interval hosts = ' '.join([h.name for h in net.hosts]) stats = quietRun('%s %d %f %s' % (CPUMONITOR, cpumon_length, cpumon_interval, hosts)) info("Terminating processes\n") quietRun('pkill -9 -f ' + CPUSTRESS) # parse cpu monitor results info("Parsing CPU monitor results\n") cpu_usage = parse_cpuacct(stats, cpulimit=cpu) appendOutput(outfile, cpu_usage)
def run(sched='cfs', cpu=.05, fastbw=100, lanbw=10, reduce=0): "Run test" quietRun('pkill -9 iperf') topo = EmulabTopo(testbw=fastbw, lanbw=lanbw, cpu=cpu) net = Mininet(topo, host=custom(Host, sched=sched, period_us=10000, isIsolated=(sched is not 'none'))) net.start() # Set up routes for extra link print[h.name for h in net.hosts] nodea, nodeb = net.nameToNode['node-A'], net.nameToNode['node-B'] nodea.cmdPrint('ifconfig ' + nodea.intfs[1] + ' up') nodeb.cmdPrint('ifconfig ' + nodeb.intfs[1] + ' up') nodea.setIP(nodea.intfs[1], '10.0.0.11') nodeb.setIP(nodeb.intfs[1], '10.0.0.12') nodea.cmdPrint('route add -host 10.0.0.12 dev ' + nodea.intfs[1]) nodeb.cmdPrint('route add -host 10.0.0.11 dev ' + nodeb.intfs[1]) nodea.cmdPrint('route del -net 10.0.0.0/8') nodeb.cmdPrint('route del -net 10.0.0.0/8') nodea.cmdPrint('route -n') nodeb.cmdPrint('route -n') print "*** starting sshd servers" for host in net.hosts: host.name, host.IP() host.cmdPrint('/usr/sbin/sshd') print "*** checking ping and ssh connectivity" net.pingAll() print "*** fixing local route on nodea" nodea.cmdPrint("ifconfig lo up") nodea.cmdPrint("ping -c1 " + nodea.IP()) for suffix in 'ABCDEFGHIJ': nodea.cmdPrint('ssh node-' + suffix + ' hostname') print "*** running test" host = quietRun('hostname').strip() cores = numCores() outfile = "emulab-%s-%sp-%s-%s-%s.out" % (host, cores, sched, cpu, fastbw) nodea.cmdPrint("./emulab-test.sh 2>&1 | tee %s; echo done" % outfile) print "stopping sshd servers" for host in net.hosts: host.cmdPrint('pkill -9 -f /usr/sbin/sshd') net.stop()
def appendResults(net, outfile, n, cpu): result = [''] * n cmd = [None] * n # Command objects for CPU stressers monitor = [None]*n monitor_outfile = [None]*n # Filenames cpu_log = [None]*n info ("Starting CPU stressors\n") # Start cpu-stressers for i in xrange(0, n): server = net.hosts[i] # run for 120 secs extra; terminated below scmd = '%s %d %d' % (CPUSTRESS, opts.time+120, 0) server.cmd(scmd + '&') monitor_outfile[i] = '/tmp/%s_cpu.out' % server.name sleep(1) info ("Starting CPU monitor\n") # Start cpu monitor startTime = int(time()) cpumon_length = opts.time # Was always one second. # Now we will try the following: since cpuacct is adjusted every # 10 ms, we should try to make sure that each process makes some # progress each time interval. # for a minimum cpu time of 20 ms, # the interval should be 20 ms * n / (cpu% * numCores()) cpumon_interval = 1.0 cpumon_min = .020 / cpu / numCores() if cpumon_interval < cpumon_min: cpumon_interval = cpumon_min print "Adjusting cpumon_interval to %.2f seconds" % cpumon_interval hosts = ' '.join([h.name for h in net.hosts]) stats = quietRun('%s %d %f %s' % (CPUMONITOR, cpumon_length, cpumon_interval, hosts)) info ("Terminating processes\n") quietRun( 'pkill -9 -f ' + CPUSTRESS ) # parse cpu monitor results info ("Parsing CPU monitor results\n") cpu_usage = parse_cpuacct(stats, cpulimit=cpu) appendOutput(outfile, cpu_usage)
def startRamcloud(self, cpu=.6): """Start Ramcloud cpu: CPU usage limit (in seconds/s)""" # Create a cgroup so Ramcloud doesn't eat all of our CPU ramcloud = CPULimitedHost('ramcloud', inNamespace=False, period_us=5000) ramcloud.setCPUFrac(cpu / numCores()) info('\n') ramcloud.cmd('export PATH=%s:$PATH' % self.onosDir) ramcloud.cmd('export ONOS_LOGDIR=%s' % self.logDir) for daemon in 'coord', 'server': ramcloud.cmd('onos.sh rc-%s start' % daemon) pid = self.waitStart('Ramcloud %s' % daemon, 'obj.master/' + daemon) self.waitNetstat(pid) status = self.cmd('onos.sh rc-%s.sh status' % daemon) if 'running' not in status: raise Exception('Ramcloud %s startup failed: ' % daemon + status) self.ramcloud = ramcloud
def __init__(self, topo=None, switch=OVSKernelSwitch, host=Host, controller=DefaultController, link=Link, intf=Intf, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, waitConnected=False, interfaceID=3, ssid="my_ssid", mode="g", channel="1", wirelessRadios=0): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.baseStation = switch self.host = host self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse(self.ipBase) self.nextIP = 1 # start for address allocation self.inNamespace = inNamespace self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.waitConn = waitConnected self.wirelessdeviceControl = [] self.wirelessifaceControl = [] self.nextIface = 1 self.baseStationName = [] self.stationName = [] self.interfaceID = interfaceID self.ssid = ssid self.mode = mode self.channel = channel self.wirelessRadios = wirelessRadios self.isWireless = Node.isWireless self.hosts = [] self.switches = [] self.baseStations = [] self.controllers = [] self.links = [] self.nameToNode = {} # name to Node (Host/Switch) objects self.terms = [] # list of spawned xterm processes Mininet.init() # Initialize Mininet if necessary if (Node.isWireless == True or self.wirelessRadios != 0): Node.isWireless = True module(self.wirelessRadios, Node.isWireless) #Initatilize WiFi Module self.built = False if topo and build: self.build()
def __init__( self, topo=None, switch=OVSKernelSwitch, legacySwitch=LegacySwitch, hostInterface=HostInterface, legacyRouter=QuaggaRouter, transitPortalRouter=TransitPortalRouter, host=Host, controller=Controller, link=Link, intf=Intf, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, bridgeNum=0, defaultHostInterfaceBind='eth0'): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.legacySwitch = legacySwitch self.hostInterface = hostInterface self.legacyRouter = legacyRouter self.transitPortalRouter = transitPortalRouter self.host = host self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) self.nextIP = 1 # start for address allocation self.inNamespace = inNamespace self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.bridgeNum = bridgeNum self.defaultHostInterfaceBind = defaultHostInterfaceBind self.hosts = [] self.switches = [] self.legacySwitches = [] self.hostInterfaces = [] self.legacyRouters = [] self.transitPortalRouters = [] self.controllers = [] self.nameToNode = {} # name to Node (Host/Switch/hostInterface/legacy(Router,Switch)) objects self.physicalAdaptersBridged = [] # physical adapters connected to a (any) bridge self.bridgeInterfaces = [] # instantiated bridges self.terms = [] # list of spawned xterm processes Mininet.init() # Initialize Mininet if necessary self.built = False if topo and build: self.build()
def parseOptions(): "Parse command line options" parser = OptionParser() parser.add_option('-o', '--output', dest='outfile', default=None, help='write output to file') parser.add_option('-t', '--time', dest='time', type='int', default=10, help='select cpu-stress time interval') parser.add_option('-r', '--runs', dest='runs', type='int', default=1, help='No. of runs for each topo') parser.add_option('-u', '--util', dest='cpu', type='float', default=.5, help='fraction of entire system to use (.5)') parser.add_option('-i', '--bwinterval', dest='period', type='int', default=100000, help='bw enforcement interval in microseconds') parser.add_option('-c', '--counts', dest='counts', action='callback', callback=intListCallback, default=[1], type='string', help='specify pair counts, e.g. 10,20,40') # Disabled until we fix for RT: # parser.add_option( '-n', '--numprocs', dest='numprocs', # type='int', default=1, help='no. of cpu-stress processes in each host' ) parser.add_option('-s', '--static', dest='static', default=False, action='store_true', help='statically allocate CPU to each host') parser.add_option('-b', '--bwsched', dest='sched', default='cfs', help='bandwidth scheduler: cfs (default) | rt | none') (options, args) = parser.parse_args() if options.sched not in ['cfs', 'rt', 'none']: print "CPU bandwidth scheduler should be 'cfs' or 'rt' or 'none'." parser.print_help() exit(1) options.host = quietRun('hostname').strip() options.cores = numCores() # Limited to 1 until we fix for RT: options.numprocs = 1 return options, args
def __init__(self, topo=None, switch=OVSKernelSwitch, accessPoint=OVSKernelAP, host=Host, station=Station, car=Car, controller=DefaultController, link=Link, intf=Intf, build=True, xterms=False, ipBase='2001:0:0:0:0:0:0:0/64', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, waitConnected=False, ssid="new-ssid", mode="g", channel="1", enable_wmediumd=False, enable_interference=False, enable_spec_prob_link=False, enable_error_prob=False, fading_coefficient=0, disableAutoAssociation=False, driver='nl80211', autoSetPositions=False, configureWiFiDirect=False, configure4addr=False, defaultGraph=False, noise_threshold=-91, cca_threshold=-90, rec_rssi=False): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.host = host self.station = station self.accessPoint = accessPoint self.car = car self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse(self.ipBase) self.nextIP = 1 # start for address allocation self.nextPosition = 1 # start for position allocation self.repetitions = 1 # mobility: number of repetitions self.inNamespace = inNamespace self.xterms = xterms self.autoSetMacs = autoSetMacs self.autoSetPositions = autoSetPositions self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.waitConn = waitConnected self.routing = '' self.ssid = ssid self.mode = mode self.wmediumd_mode = '' self.channel = channel self.nameToNode = {} # name to Node (Host/Switch) objects self.aps = [] self.controllers = [] self.hosts = [] self.links = [] self.cars = [] self.carsSW = [] self.carsSTA = [] self.switches = [] self.stations = [] self.sixLP = [] self.terms = [] # list of spawned xterm processes self.driver = driver self.disableAutoAssociation = disableAutoAssociation self.mobilityKwargs = '' self.isMobilityModel = False self.isMobility = False self.ppm_is_set = False self.alreadyPlotted = False self.DRAW = False self.ifb = False self.isVanet = False self.noise_threshold = noise_threshold self.cca_threshold = cca_threshold self.configureWiFiDirect = configureWiFiDirect self.configure4addr = configure4addr self.enable_wmediumd = enable_wmediumd self.enable_error_prob = enable_error_prob self.fading_coefficient = fading_coefficient self.enable_interference = enable_interference self.enable_spec_prob_link = enable_spec_prob_link self.mobilityparam = dict() self.AC = '' self.alternativeModule = '' self.rec_rssi = rec_rssi self.plot = plot2d self.n_wpans = 0 self.n_radios = 0 self.min_x = 0 self.min_y = 0 self.min_z = 0 self.max_x = 0 self.max_y = 0 self.max_z = 0 self.nroads = 0 self.connections = {} self.wlinks = [] Mininet_sixLoWPAN.init() # Initialize Mininet if necessary self.built = False if topo and build: self.build()
def parse_cpuacct(stats, cpulimit=None): '''Return the following: cpu_usage[n], with each element a dict{'xvals':[], 'cpuvals':[]} cpu_stat[n] with each element a dict{'xvals':[], 'uservals': [], 'systemvals': []} ''' cpu_usage = {} rec_re = re.compile(r'cgroup (\S+),time (\d+.\d+)') spaces = re.compile('\s+') host = None time = None # Report CPU limit in CPU seconds rather than as a fraction (!) cores = numCores() cpulimit *= cores for line in stats.split('\n'): m = rec_re.match(line) if m: host = m.group(1) time = float(m.group(2)) else: line = spaces.sub(' ', line).split() if 'usage' in line: cpuval = float(line[1]) if host is None or time is None: continue if host not in cpu_usage: cpu_usage[host] = { 'xvals': [], 'cpuvals': [], 'uservals': [], 'systemvals': [], 'percpuvals': [], 'cpulimit': cpulimit } cpu_usage[host]['xvals'].append(time) cpu_usage[host]['cpuvals'].append(cpuval) elif 'user' in line: userval = float(line[1]) if host is None or time is None: continue cpu_usage[host]['uservals'].append(userval) elif 'system' in line: systemval = float(line[1]) if host is None or time is None: continue cpu_usage[host]['systemvals'].append(systemval) elif 'percpu' in line: percpuval = map(float, line[1:]) if host is None or time is None: continue cpu_usage[host]['percpuvals'].append(percpuval) # Round results to reported (though not necessarily actual) accuracy (ns, HZ) def r9(x): return round(x, 9) def r2(x): return round(x, 2) for k, v in cpu_usage.iteritems(): intervals = diff_list(v['xvals']) v['xvals'] = [r9(x - v['xvals'][0]) for x in v['xvals']] v['xvals'].pop(0) v['cpuvals'] = [ r9(1e-9 * x / y) for x, y in zip(diff_list(v['cpuvals']), intervals) ] v['uservals'] = [ r2(1e-2 * x / y) for x, y in zip(diff_list(v['uservals']), intervals) ] v['systemvals'] = [ r2(1e-2 * x / y) for x, y in zip(diff_list(v['systemvals']), intervals) ] v['percpuvals'] = [[ r9(1e-9 * x / y) for x in l ] for l, y in zip(double_diff_list(v['percpuvals']), intervals)] v['cpucount'] = cores return [cpu_usage[k] for k in sorted(cpu_usage.keys(), key=natural)]
nodea.cmdPrint('route -n') nodeb.cmdPrint('route -n') print "*** starting sshd servers" for host in net.hosts: host.name, host.IP() host.cmdPrint('/usr/sbin/sshd') print "*** checking ping and ssh connectivity" net.pingAll() print "*** fixing local route on nodea" nodea.cmdPrint("ifconfig lo up") nodea.cmdPrint("ping -c1 " + nodea.IP() ) for suffix in 'ABCDEFGHIJ': nodea.cmdPrint( 'ssh node-' + suffix + ' hostname') print "*** running test" host = quietRun('hostname').strip() cores = numCores() outfile = "emulab-%s-%sp-%s-%s-%s.out" % ( host, cores, sched, cpu, fastbw) nodea.cmdPrint("./emulab-test.sh 2>&1 | tee %s; echo done" % outfile) print "stopping sshd servers" for host in net.hosts: host.cmdPrint('pkill -9 -f /usr/sbin/sshd') net.stop() if __name__ == '__main__': setLogLevel( 'info' ) targetbw=200 targetcpu=.09 run( sched='cfs', cpu=targetcpu, fastbw=targetbw ) run( sched='cfs', cpu=targetcpu, fastbw=None ) run( sched='none', cpu=None, fastbw=targetbw ) run( sched='none', cpu=None, fastbw=None, lanbw=None )
def __init__(self, topo=None, switch=OVSKernelSwitch, legacySwitch=LegacySwitch, hostInterface=HostInterface, legacyRouter=QuaggaRouter, transitPortalRouter=TransitPortalRouter, host=Host, controller=Controller, link=Link, intf=Intf, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, bridgeNum=0, defaultHostInterfaceBind='eth0'): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.legacySwitch = legacySwitch self.hostInterface = hostInterface self.legacyRouter = legacyRouter self.transitPortalRouter = transitPortalRouter self.host = host self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse(self.ipBase) self.nextIP = 1 # start for address allocation self.inNamespace = inNamespace self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.bridgeNum = bridgeNum self.defaultHostInterfaceBind = defaultHostInterfaceBind self.hosts = [] self.switches = [] self.legacySwitches = [] self.hostInterfaces = [] self.legacyRouters = [] self.transitPortalRouters = [] self.controllers = [] self.nameToNode = { } # name to Node (Host/Switch/hostInterface/legacy(Router,Switch)) objects self.physicalAdaptersBridged = [ ] # physical adapters connected to a (any) bridge self.bridgeInterfaces = [] # instantiated bridges self.terms = [] # list of spawned xterm processes Mininet.init() # Initialize Mininet if necessary self.built = False if topo and build: self.build()
def CPUIsolationTest(opts): "Check CPU isolation for various no. of nodes." cpustress = 'cpu/cpu-stress' cpumonitor = 'cpu/cpumonitor' results = [] initOutput(opts.outfile, opts) for n in opts.counts: for run in xrange(1, opts.runs + 1): # divide target utilization across hosts cpu = opts.cpu / n print 'Running CPU Test: %d nodes, cpu=%.3f%%, trial no. %d' % ( n, 100.0 * cpu, run) host = custom(CPUIsolationHost, cpu=cpu, sched=opts.sched, period_us=opts.period) net = Mininet(topo=CPUIsolationTopo(n), host=host, autoPinCpus=opts.static) net.start() result = [''] * n cmd = [None] * opts.numprocs * n #monitor = [None]*n #monitor_outfile = [None]*n #cpu_log = [None]*n #start the cpu-stressers for i in xrange(0, n): server = net.hosts[i] # scmd = '%s %d %d' % (cpustress, opts.time+10, 0) # run for ten secs extra scmd = cpustress # run indefinitely! for j in range(opts.numprocs): # was: cmd[j*n + i] = server.lxcSendCmd(scmd) # Using shell for now since lxc-attach is broken for RT cmd[j * n + i] = server.sendCmd(scmd) #monitor_outfile[i] = '/tmp/%s_cpu.out' % server.name #monitor[i] = start_monitor_cpu(server, monitor_outfile[i]) sleep(1) # start the cpu monitor startTime = int(time()) cpumon_length = opts.time # Was always one second. # Now we want the cpuacct timer to get a 10 ms tick, # even with minimum quota of 1 ms cpumon_interval = 1.0 cpumon_min = .011 / (cpu * numCores()) if cpumon_interval < cpumon_min: cpumon_interval = cpumon_min print "Adjusting cpumon_interval to %.2f seconds" % cpumon_interval hosts = ' '.join([h.name for h in net.hosts]) info('*** Running test and monitoring output\n') cmd = ('%s %d %f %s' % (cpumonitor, cpumon_length, cpumon_interval, hosts)) stats = quietRun(cmd) # parse cpu monitor results cpu_usage = parse_cpuacct(stats, cpulimit=cpu) #fetch the results # BL: Ignore this for now to avoid shutdown effects! if False: for i in xrange(0, n): server = net.hosts[i] for j in range(opts.numprocs): # was: c = cmd[j*n + i].waitOutput() c = server.waitOutput() #stop_monitor_cpu(server) # sleep(0.5) #cpu_log[i] = parse_cpu_log(monitor_outfile[i]) #quietRun('rm -rf %s' % monitor_outfile[i]) try: result[i] = c.split('\n')[1].replace(',', ':') except: result[i] = 'NaN' print ','.join(result) else: quietRun('pkill -9 ' + cpustress) #appendOutput(opts, cpu_log) appendOutput(opts.outfile, cpu_usage) net.stop()
def rtInfo( self, f ): "Internal method: return parameters for RT bandwidth" pstr, qstr = 'rt_period_us', 'rt_runtime_us' # RT uses wall clock time for period and quota quota = int( self.period_us * f * numCores() ) return pstr, qstr, self.period_us, quota
def parse_cpuacct(stats, cpulimit=None): '''Return the following: cpu_usage[n], with each element a dict{'xvals':[], 'cpuvals':[]} cpu_stat[n] with each element a dict{'xvals':[], 'uservals': [], 'systemvals': []} ''' cpu_usage = {} rec_re = re.compile(r'cgroup (\S+),time (\d+.\d+)') spaces = re.compile('\s+') host = None time = None # Report CPU limit in CPU seconds rather than as a fraction (!) cores = numCores() cpulimit *= cores for line in stats.split('\n'): m = rec_re.match(line) if m: host = m.group(1) time = float(m.group(2)) else: line = spaces.sub( ' ', line ).split() if 'usage' in line: cpuval = float(line[1]) if host is None or time is None: continue if host not in cpu_usage: cpu_usage[host] = {'xvals':[], 'cpuvals':[], 'uservals':[], 'systemvals':[], 'percpuvals':[], 'cpulimit': cpulimit } cpu_usage[host]['xvals'].append(time) cpu_usage[host]['cpuvals'].append(cpuval) elif 'user' in line: userval = float(line[1]) if host is None or time is None: continue cpu_usage[host]['uservals'].append(userval) elif 'system' in line: systemval = float(line[1]) if host is None or time is None: continue cpu_usage[host]['systemvals'].append(systemval) elif 'percpu' in line: percpuval = map(float, line[1:]) if host is None or time is None: continue cpu_usage[host]['percpuvals'].append(percpuval) # Round results to reported (though not necessarily actual) accuracy (ns, HZ) def r9(x): return round(x,9) def r2(x): return round(x,2) for k,v in cpu_usage.iteritems(): intervals = diff_list(v['xvals']) v['xvals'] = [r9(x - v['xvals'][0]) for x in v['xvals']] v['xvals'].pop(0) v['cpuvals'] = [r9(1e-9*x/y) for x, y in zip(diff_list(v['cpuvals']), intervals)] v['uservals'] = [r2(1e-2*x/y) for x, y in zip(diff_list(v['uservals']), intervals)] v['systemvals'] = [r2(1e-2*x/y) for x, y in zip(diff_list(v['systemvals']), intervals)] v['percpuvals'] = [[r9(1e-9*x/y) for x in l] for l, y in zip(double_diff_list(v['percpuvals']), intervals)] v['cpucount'] = cores return [cpu_usage[k] for k in sorted(cpu_usage.keys(), key=natural)]
class Mininet_IoT(Mininet): topo = None sixLoWPan = sixLoWPan APSensor = OVSSensor controller = DefaultController intf = Intf inNamespace = False autoSetMacs = False autoStaticArp = False autoPinCpus = False listenPort = None waitConnected = False autoSetPositions = False ipBase = '10.0.0.0/8' ip6Base = '2001:0:0:0:0:0:0:0/64' ipBaseNum, prefixLen = netParse(ipBase) ip6BaseNum, prefixLen6 = netParse6(ip6Base) nextIP = 1 # start for address allocation nextIP6 = 1 # start for address allocation nextPosition = 1 inNamespace = inNamespace numCores = numCores() nextCore = 0 # next core for pinning hosts to CPUs nameToNode = {} # name to Node (Host/Switch) objects links = [] apsensors = [] sensors = [] terms = [] # list of spawned xterm processes nwpans = 0 connections = {} wlinks = [] @classmethod def init_module(cls, iot_module): sensors = cls.sensors + cls.apsensors module(sensors, cls.nwpans, iot_module=iot_module) return cls.sensors, cls.apsensors @classmethod def pos_to_array(cls, node): pos = node.params['position'] if isinstance(pos, string_types): pos = pos.split(',') node.position = [float(pos[0]), float(pos[1]), float(pos[2])] node.params.pop('position', None) @classmethod def addParameters(cls, node, **params): """adds parameters to wireless nodes node: node autoSetMacs: set MAC addrs automatically like IP addresses params: parameters defaults: Default IP and MAC addresses node_mode: if interface is running in managed or master mode""" node.params['wpan'] = [] wpans = cls.count6LoWPANIfaces(**params) for wpan in range(wpans): node.params['wpan'].append(node.name + '-wpan' + str(wpan)) node.params.pop("wpans", None) @staticmethod def appendAssociatedTo(node): "Add associatedTo param" node.params['associatedTo'].append('') @classmethod def addAPSensor(self, name, cls=None, **params): """Add AccessPoint as a Sensor. name: name of accesspoint to add cls: custom switch class/constructor (optional) returns: added accesspoint side effect: increments listenPort var .""" defaults = {'listenPort': self.listenPort, 'inNamespace': self.inNamespace } defaults.update(params) if self.autoSetPositions: defaults['position'] = (round(self.nextPos_ap, 2), 50, 0) self.nextPos_ap += 100 if not cls: cls = self.APSensor ap = cls(name, **defaults) if not self.inNamespace and self.listenPort: self.listenPort += 1 self.nameToNode[name] = ap if 'position' in params: self.pos_to_array(ap) self.addParameters(ap, **defaults) self.apsensors.append(ap) return ap @classmethod def addSensor(self, name, cls=None, **params): """Add Sensor node. name: name of station to add cls: custom 6LoWPAN class/constructor (optional) params: parameters for 6LoWPAN returns: added station""" # Default IP and MAC addresses nextIP6 = params['nextIP6'] defaults = {'ip6': ipAdd6(nextIP6, ipBaseNum=self.ip6BaseNum, prefixLen=self.prefixLen6) + '/%s' % self.prefixLen6 } defaults.update(params) if self.autoSetPositions: defaults['position'] = ('%s,0,0' % self.nextPosition) if self.autoSetMacs: defaults['mac'] = macColonHex(nextIP6) if self.autoPinCpus: defaults['cores'] = self.nextCore self.nextCore = (self.nextCore + 1) % self.numCores self.nextIP6 += 1 self.nextPosition += 1 if not cls: cls = self.sixLoWPan node = cls(name, **defaults) self.nameToNode[name] = node if 'position' in params: self.pos_to_array(node) self.addParameters(node, **defaults) self.sensors.append(node) return node # BL: We now have four ways to look up nodes # This may (should?) be cleaned up in the future. def getNodeByName(self, *args): "Return node(s) with given name(s)" if len(args) is 1: return self.nameToNode[args[0]] return [self.nameToNode[n] for n in args] def get(self, *args): "Convenience alias for getNodeByName" return self.getNodeByName(*args) # Even more convenient syntax for node lookup and iteration def __getitem__(self, key): "net[ name ] operator: Return node with given name" return self.nameToNode[key] def __delitem__(self, key): "del net[ name ] operator - delete node with given name" self.delNode(self.nameToNode[key]) def __contains__(self, item): "returns True if net contains named node" return item in self.nameToNode def keys(self): "return a list of all node names or net's keys" return list(self) def values(self): "return a list of all nodes or net's values" return [self[name] for name in self] def items(self): "return (key,value) tuple list for every node in net" return zip(self.keys(), self.values()) @staticmethod def _parsePing(pingOutput): "Parse ping output and return packets sent, received." # Check for downed link if 'connect: Network is unreachable' in pingOutput: return 1, 0 r = r'(\d+) packets transmitted, (\d+)( packets)? received' m = re.search(r, pingOutput) if m is None: error('*** Error: could not parse ping output: %s\n' % pingOutput) return 1, 0 sent, received = int(m.group(1)), int(m.group(2)) return sent, received @classmethod def ping6(self, hosts=None, timeout=None): """Ping6 between all specified hosts. hosts: list of hosts timeout: time to wait for a response, as string returns: ploss packet loss percentage""" # should we check if running? packets = 0 lost = 0 ploss = None if not hosts: hosts = self.sensors output('*** Ping: testing ping reachability\n') for node in hosts: output('%s -> ' % node.name) for dest in hosts: if node != dest: opts = '' if timeout: opts = '-W %s' % timeout if dest.intfs: result = node.cmdPrint('ping6 -c1 %s %s' % (opts, dest.IP())) sent, received = self._parsePing(result) else: sent, received = 0, 0 packets += sent if received > sent: error('*** Error: received too many packets') error('%s' % result) node.cmdPrint('route') exit(1) lost += sent - received output(('%s ' % dest.name) if received else 'X ') output('\n') if packets > 0: ploss = 100.0 * lost / packets received = packets - lost output("*** Results: %i%% dropped (%d/%d received)\n" % (ploss, received, packets)) else: ploss = 0 output("*** Warning: No packets sent\n") return ploss @staticmethod def _parseFull(pingOutput): "Parse ping output and return all data." errorTuple = (1, 0, 0, 0, 0, 0) # Check for downed link r = r'[uU]nreachable' m = re.search(r, pingOutput) if m is not None: return errorTuple r = r'(\d+) packets transmitted, (\d+)( packets)? received' m = re.search(r, pingOutput) if m is None: error('*** Error: could not parse ping output: %s\n' % pingOutput) return errorTuple sent, received = int(m.group(1)), int(m.group(2)) r = r'rtt min/avg/max/mdev = ' r += r'(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+) ms' m = re.search(r, pingOutput) if m is None: if received is 0: return errorTuple error('*** Error: could not parse ping output: %s\n' % pingOutput) return errorTuple rttmin = float(m.group(1)) rttavg = float(m.group(2)) rttmax = float(m.group(3)) rttdev = float(m.group(4)) return sent, received, rttmin, rttavg, rttmax, rttdev def pingFull(self, hosts=None, timeout=None): """Ping between all specified hosts and return all data. hosts: list of hosts timeout: time to wait for a response, as string returns: all ping data; see function body.""" # should we check if running? # Each value is a tuple: (src, dsd, [all ping outputs]) all_outputs = [] if not hosts: hosts = self.hosts output('*** Ping: testing ping reachability\n') for node in hosts: output('%s -> ' % node.name) for dest in hosts: if node != dest: opts = '' if timeout: opts = '-W %s' % timeout result = node.cmd('ping -c1 %s %s' % (opts, dest.IP())) outputs = self._parsePingFull(result) sent, received, rttmin, rttavg, rttmax, rttdev = outputs all_outputs.append((node, dest, outputs)) output(('%s ' % dest.name) if received else 'X ') output('\n') output("*** Results: \n") for outputs in all_outputs: src, dest, ping_outputs = outputs sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs output(" %s->%s: %s/%s, " % (src, dest, sent, received)) output("rtt min/avg/max/mdev %0.3f/%0.3f/%0.3f/%0.3f ms\n" % (rttmin, rttavg, rttmax, rttdev)) return all_outputs def pingAll(self, timeout=None): """Ping between all hosts. returns: ploss packet loss percentage""" return self.ping6(timeout=timeout) @staticmethod def _parseIperf(iperfOutput): """Parse iperf output and return bandwidth. iperfOutput: string returns: result string""" r = r'([\d\.]+ \w+/sec)' m = re.findall(r, iperfOutput) if m: return m[-1] else: # was: raise Exception(...) error('could not parse iperf output: ' + iperfOutput) return '' def iperf(self, hosts=None, l4Type='TCP', udpBw='10M', fmt=None, seconds=5, port=5001): """Run iperf between two hosts. hosts: list of hosts; if None, uses first and last hosts l4Type: string, one of [ TCP, UDP ] udpBw: bandwidth target for UDP test fmt: iperf format argument if any seconds: iperf time to transmit port: iperf port returns: two-element array of [ server, client ] speeds note: send() is buffered, so client rate can be much higher than the actual transmission rate; on an unloaded system, server rate should be much closer to the actual receive rate""" sleep(2) nodes = self.sensors hosts = hosts or [nodes[0], nodes[-1]] assert len(hosts) is 2 client, server = hosts output('*** Iperf: testing', l4Type, 'bandwidth between', client, 'and', server, '\n') server.cmd('killall -9 iperf') iperfArgs = 'iperf -p %d ' % port bwArgs = '' if l4Type is 'UDP': iperfArgs += '-u ' bwArgs = '-b ' + udpBw + ' ' elif l4Type != 'TCP': raise Exception('Unexpected l4 type: %s' % l4Type) if fmt: iperfArgs += '-f %s ' % fmt server.sendCmd(iperfArgs + '-s') if l4Type is 'TCP': if not waitListening(client, server.IP(), port): raise Exception('Could not connect to iperf on port %d' % port) cliout = client.cmd(iperfArgs + '-t %d -c ' % seconds + server.IP() + ' ' + bwArgs) debug('Client output: %s\n' % cliout) servout = '' # We want the last *b/sec from the iperf server output # for TCP, there are two of them because of waitListening count = 2 if l4Type is 'TCP' else 1 while len(re.findall('/sec', servout)) < count: servout += server.monitor(timeoutms=5000) server.sendInt() servout += server.waitOutput() debug('Server output: %s\n' % servout) result = [self._parseIperf(servout), self._parseIperf(cliout)] if l4Type is 'UDP': result.insert(0, udpBw) output('*** Results: %s\n' % result) return result @classmethod def count6LoWPANIfaces(self, **params): "Count the number of virtual 6LoWPAN interfaces" if 'wpans' in params: self.nwpans += int(params['wpans']) wpans = int(params['wpans']) else: wpans = 1 self.nwpans += 1 return wpans def kill_fakelb(self): "Kill fakelb" module.fakelb() sleep(0.1) def configureIface(self, node, wlan): intf = module.wlan_list[0] module.wlan_list.pop(0) node.renameIface(intf, node.params['wpan'][wlan]) def closeMininetWiFi(self): "Close Mininet-WiFi" module.stop()
def __init__(self, topo=None, switch=OVSKernelSwitch, host=Host, controller=DefaultController, link=Link, intf=Intf, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', inNamespace=False, autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, listenPort=None, waitConnected=False, monitor_interval=5, sample_interval=1, tdf_adaptor=None): """Create Mininet object. topo: Topo (topology) object or None switch: default Switch class host: default Host class/constructor controller: default Controller class/constructor link: default Link class/constructor intf: default Intf class/constructor ipBase: base IP address for hosts, build: build now from topo? xterms: if build now, spawn xterms? cleanup: if build now, cleanup before creating? inNamespace: spawn switches and controller in net namespaces? autoSetMacs: set MAC addrs automatically like IP addresses? autoStaticArp: set all-pairs static MAC addrs? autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? listenPort: base listening port to open; will be incremented for each additional switch in the net if inNamespace=False""" self.topo = topo self.switch = switch self.host = host self.controller = controller self.link = link self.intf = intf self.ipBase = ipBase self.ipBaseNum, self.prefixLen = netParse(self.ipBase) self.nextIP = 1 # start for address allocation self.inNamespace = inNamespace self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus self.numCores = numCores() self.nextCore = 0 # next core for pinning hosts to CPUs self.listenPort = listenPort self.waitConn = waitConnected self.hosts = [] self.switches = [] self.controllers = [] self.links = [] self.nameToNode = {} # name to Node (Host/Switch) objects self.terms = [] # list of spawned xterm processes self.monitor_queue = Queue.Queue() self.monitor_interval = monitor_interval self.sample_interval = sample_interval self.tdf_adaptor = tdf_adaptor self.record_file = open('record_tdf.log', 'a+') self.record_file.write( "*****************\nNew Expriment Starts\n*****************\n") self.updating_tdf = False self.cpu_usage = -1 self.stop_timer = False Mininet.init() # Initialize Mininet if necessary self.built = False if topo and build: self.build()