def run(m, cfg): if not cfg.hotplug: raise RuntimeError("The Exim benchmark requires hotplug = True. " "Either enable hotplug or disable the Exim " "benchmark in config.py.") host = cfg.primaryHost m += host m += HostInfo(host) fs = FileSystem(host, cfg.fs, clean=True) m += fs eximPath = os.path.join(cfg.benchRoot, "exim") m += SetCPUs(host=host, num=cfg.cores) m += EximDaemon(host, eximPath, cfg.eximBuild, os.path.join(fs.path + "0"), os.path.join(fs.path + "spool"), cfg.eximPort) sysmon = SystemMonitor(host) m += sysmon for trial in range(cfg.trials): # XXX It would be a pain to make clients dependent on # cfg.cores. m += EximLoad(host, trial, eximPath, cfg.cores, cfg.clients, cfg.eximPort, sysmon) # m += cfg.monitors m.run()
def run(m, cfg): ports = range(BASE_PORT, BASE_PORT + cfg.cores) host = cfg.primaryHost loadHosts = cfg.getMemcacheClients(host, ports) m += host m += HostInfo(host) for loadHost in set(h.host for h in loadHosts): m += loadHost if cfg.hotplug: m += SetCPUs(host=host, num=cfg.cores) # XXX Make configurable (at least iface name) m += IXGBE(host, "eth0", queues="n%min(NCPU, NRX if rx else NTX)", flowDirector="fixed-port-routing") # We need to start memcached after hot-plugging cores because # we bind it to specific cores. mcPath = os.path.join(cfg.benchRoot, "memcached") memcached = Memcached(host, mcPath, "memcached-stock", ports, 1, 400) m += memcached sysmon = ExplicitSystemMonitor(host) m += sysmon m += MemcachedLoad(loadHosts, memcached, sysmon) m.run()
def run(m, cfg): host = cfg.primaryHost m += host m += HostInfo(host) fs = None if cfg.baseFSPath is "": fs = FileSystem(host, cfg.fs, clean=True) else: fs = FileSystem(host, cfg.fs, basepath=cfg.baseFSPath, clean=True) print "fs value is: " + str(fs) m += fs # XXX. # if cfg.kernelRoot doesn't exit # - download & unarchive # - from https://www.kernel.org/pub/linux/kernel/v3.x/linux-3.18.tar.xz # It's really hard to predict what make will access, so we # prefetch the whole source tree. This, combined with the # pre-build of init/main.o, eliminates virtually all disk # reads. For the rest, we'll just have to rely on multiple # trials or at least multiple configurations to cache. if cfg.noCaching is False: m += PrefetchDir(host, cfg.kernelRoot, ["*/.git"]) if cfg.hotplug: m += SetCPUs(host=host, num=cfg.cores) sysmon = SystemMonitor(host) m += sysmon for trial in range(cfg.trials): m += GmakeLoad(host, trial, cfg.cores, cfg.kernelRoot, fs.path + "0", sysmon, cfg.pstat, cfg.precord, cfg.linuxSrc, m.tasks()[0].getPath(), cfg.perfBin, cfg.noCaching) # m += cfg.monitors m.run()
def run(m, cfg): host = cfg.primaryHost loadHosts = cfg.getApacheClients(cfg) m += host m += HostInfo(host) for loadHost in set(loadHosts): m += loadHost if cfg.hotplug: m += SetCPUs(host=host, num=cfg.cores) # XXX Make configurable (at least iface name) m += IXGBE(host, "eth0", queues="n%min(NCPU, NRX if rx else NTX)", flowDirector="spread-incoming") # We need to start memcached after hot-plugging cores because # we bind it to specific cores. apachePath = os.path.join(cfg.benchRoot, "apache") total = cfg.cores * cfg.threadsPerCore apache = Apache( host, apachePath, "apache-mod", PORT, cfg.fileSize, ListenBacklog=512, KeepAlive=False, ServerLimit=cfg.cores, StartServers=cfg.cores, ThreadLimit=cfg.threadsPerCore, MaxClients=total, MinSpareThreads=total, MaxSpareThreads=total, ThreadsPerChild=cfg.threadsPerCore, MaxRequestsPerChild=0, ) m += apache m += Httperf(loadHosts, cfg.getApacheRate(cfg), apache, cfg.getApacheFDLim(cfg)) sysmon = SystemMonitor(host) m += sysmon m += ApacheMon(apache, cfg.cores, sysmon) m.run()
def run(m, cfg): host = cfg.primaryHost loadgen = cfg.postgresClient m += host m += loadgen m += HostInfo(host) # Creating the db takes time, so we don't clean the file # system. We avoid any cruft that may be there already by # putting the DB in a subdirectory. fs = FileSystem(host, cfg.fs, clean = False) m += fs dbdir = fs.path + "0/postgres" pgPath = os.path.join(cfg.benchRoot, "postgres") pgBuild = getBuild(cfg) pgOpts = {"shared_buffers": postgres.PGVal(cfg.bufferCache, "MB")} log2NumLockPartitions = int(math.log(cfg.lockPartitions, 2)) if cfg.lockPartitions != 1 << log2NumLockPartitions: raise ValueError("numLockPartitions must be a power of 2, got %r" % cfg.numLockPartitions) pgOpts["log2_num_lock_partitions"] = log2NumLockPartitions if cfg.sleep == "sysv": pgOpts["semas_per_set"] = cfg.semasPerSet pg = postgres.Postgres(host, pgPath, pgBuild, dbdir, malloc = cfg.malloc, **pgOpts) m += postgres.InitDB(host, pg).addTrust(loadgen) m += pg if cfg.hotplug: # Because the number of cores and the number of clients is # the same, we don't strictly need hotplug m += SetCPUs(host = host, num = cfg.cores) # XXX Make configurable (at least iface name) # m += IXGBE(host, "eth0", queues = "n*NCPU/(NRX if rx else NTX)") # The ixgbe driver assigns flows to queues sequentially. # Since we only have cfg.cores flows, make sure a sequential # assignment spans all the online cores. However, this does # not spread things out if we have more queues than cores. m += IXGBE(host, "eth0", queues = "n%min(NCPU, NRX if rx else NTX)") sysmon = ExplicitSystemMonitor(host) m += sysmon for trial in range(cfg.trials): m += PostgresLoad(loadgen, trial, pg, cfg.cores, cfg.cores, cfg.rows, cfg.partitions, cfg.batchSize, cfg.randomWritePct, sysmon) m.run()
def run(m, cfg): host = cfg.primaryHost m += host m += HostInfo(host) fs = FileSystem(host, cfg.fs, clean=True) m += fs if cfg.hotplug: m += SetCPUs(host=host, num=cfg.cores) sysmon = SystemMonitor(host) m += sysmon for trial in range(cfg.trials): m += GmakeQemuLoad(host, trial, cfg.cores, cfg.kernelRoot, fs.path + "0", sysmon, cfg.benchRoot, cfg.coresPerSocket, m.tasks()[0].getPath(), cfg.linuxSrc, cfg.qpin, cfg.lockStats, cfg.multiVM, cfg.precord, cfg.perfBin, cfg.perfKVMRec, cfg.perfGuestRec, cfg.kvmStat, cfg.kvmTrace) # m += cfg.monitors m.run()
def run(m, cfg): host = cfg.primaryHost m += host m += HostInfo(host) fs = FileSystem(host, cfg.fs, clean=True) m += fs # It's really hard to predict what make will access, so we # prefetch the whole source tree. This, combined with the # pre-build of init/main.o, eliminates virtually all disk # reads. For the rest, we'll just have to rely on multiple # trials or at least multiple configurations to cache. m += PrefetchDir(host, cfg.kernelRoot, ["*/.git"]) if cfg.hotplug: m += SetCPUs(host=host, num=cfg.cores) sysmon = SystemMonitor(host) m += sysmon for trial in range(cfg.trials): m += GmakeLoad(host, trial, cfg.cores, cfg.kernelRoot, fs.path + "0", sysmon) # m += cfg.monitors m.run()
def run(m, cfg): # XXX Clean hugetlb directories between trials? host = cfg.primaryHost m += host m += HostInfo(host) if cfg.model == "hugetlb": # We explicitly clean before each trial fs = FileSystem(host, "hugetlb", clean=False) m += fs else: fs = None metisPath = os.path.join(cfg.benchRoot, "metis") setcpus = SetCPUs(host=host, num=cfg.cores, hotplug=cfg.hotplug, seq=cfg.order) m += setcpus sysmon = SystemMonitor(host) m += sysmon for trial in range(cfg.trials): m += MetisLoad(host, trial, cfg.cores, metisPath, cfg.streamflow, cfg.model, setcpus, fs, sysmon) # m += cfg.monitors m.run()
def run(m, cfg): host = cfg.primaryHost m += host m += HostInfo(host) fs = FileSystem(host, cfg.fs, clean=True) m += fs psearchyPath = os.path.join(cfg.benchRoot, "psearchy") files = Mkfiles(host, psearchyPath, cfg.textRoot) m += files m += PrefetchList(host, files.filesPath, reuse=True) setcpus = SetCPUs(host=host, num=cfg.cores, hotplug=cfg.hotplug, seq=cfg.order) m += setcpus sysmon = SystemMonitor(host) m += sysmon for trial in range(cfg.trials): m += PsearchyLoad(host, trial, psearchyPath, files.filesPath, fs.path, cfg.cores, cfg.mode, cfg.order, cfg.mem, cfg.dblim, setcpus, sysmon) # XXX # m += cfg.monitors m.run()