def prepare(self, arglist=[]): # Call the base prepare. super(TCIOLatency, self).prepare(arglist) # Create the SR. sr = self.createSR(default=True) # Populate the number of extra disks. extraDisks = [] for i in range(1, self.edisks+1): extraDisks.append(self.edisk) # Check if there any golden image. existingGuests = self.host.listGuests() if existingGuests and not self.luntype.startswith("localvm"): vm = map(lambda x:self.host.getGuest(x), existingGuests) self.goldenVM = vm[0] # if so, pick the first one available. else: # Install a one with name 'vm00' with the specified extra disks. xenrt.TEC().progress("Installing VM zero") self.goldenVM = xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname=self.goldvm, distro=self.distro, arch=self.arch, vifs=xenrt.productLib(host=self.host).Guest.DEFAULT, sr=sr.uuid, memory=4096, #4096MB: do not swap during test disks=extraDisks) self.diskprefix = self.goldenVM.vendorInstallDevicePrefix()
def createVPXOnHost(cls, host, vpxName=None, vpxHttpLocation=None): """Import a Netscaler VPX onto the specified host""" if not vpxName: vpxName = xenrt.randomGuestName() if not vpxHttpLocation: vpxHttpLocation = os.path.join(xenrt.TEC().lookup('EXPORT_DISTFILES_HTTP'), 'tallahassee/NSVPX-XEN-10.0-72.5_nc.xva') xenrt.TEC().logverbose('Importing VPX [%s] from: %s to host: %s' % (vpxName, vpxHttpLocation, host.getName())) xenrt.productLib(hostname=host.getName()).guest.createVMFromFile(host=host, guestname=vpxName, filename=vpxHttpLocation) return cls.setupNetScalerVpx(vpxName)
def run(self, arglist=None): # Install 'vm00' xenrt.TEC().progress("Installing VM zero") self.vm.append(xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname="vm00", distro=self.distro, arch=self.arch, vifs=xenrt.productLib(host=self.host).Guest.DEFAULT, disks=[ self.edisk ])) self.vbds += 2 self.rvms += 1 self.diskprefix = self.vm[0].vendorInstallDevicePrefix() # Copy latency and stats to 'vm00' sftp = self.vm[0].sftpClient() sftp.copyTo("/home/xenrtd/felipef/latency", "/root/latency") sftp.copyTo("/home/xenrtd/felipef/stats", "/root/stats") # Populate the extra disk self.vm[0].execguest("dd if=/dev/zero of=/dev/%sb bs=1M oflag=direct || true" % self.diskprefix) # Collect reference metrics (1 VM, 2 VBDs) self.collectMetrics() # Install more VMs so that we can plug more VBDs in the host for i in range(1, self.vms+1): xenrt.TEC().progress("Installing VM %d" % i) # Copies original VM (much quicker than installing another one) self.vm[0].shutdown() self.vm.append(self.vm[0].copyVM(name="vm%02d" % i)) self.vm[i].removeDisk(1) self.vm[0].start() self.vm[i].start() # At this point, we added one VM and plugged only one more VBD to the host self.rvms += 1 self.vbds += 1 # Recollect metrics self.collectMetrics() # Loop adding more VBDs for j in range(0, self.edisks): if not self.vm[i].enlightenedDrivers: self.vm[i].shutdown() self.vm[i].createDisk(sizebytes=xenrt.GIGA) self.vbds += 1 if not self.vm[i].enlightenedDrivers: self.vm[i].start() time.sleep(60) self.collectMetrics()
def createVPXOnHost(cls, host, vpxName=None, vpxHttpLocation=None): """Import a Netscaler VPX onto the specified host""" if not vpxName: vpxName = xenrt.randomGuestName() if not vpxHttpLocation: vpxHttpLocation = os.path.join( xenrt.TEC().lookup('EXPORT_DISTFILES_HTTP'), 'tallahassee/NSVPX-XEN-10.0-72.5_nc.xva') xenrt.TEC().logverbose('Importing VPX [%s] from: %s to host: %s' % (vpxName, vpxHttpLocation, host.getName())) xenrt.productLib(hostname=host.getName()).guest.createVMFromFile( host=host, guestname=vpxName, filename=vpxHttpLocation) return cls.setupNetScalerVpx(vpxName)
def installGuest(self, guests): # Install 'vm-worker' if not self.isNameinGuests(guests, "vm-worker"): xenrt.TEC().progress("Installing VM worker") postinstall = [] if self.postinstall is None else self.postinstall.split( ",") sr = self.createSR() if self.vm_image is None: self.guest = xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname="vm-worker", vcpus=self.vcpus, memory=self.vm_ram, distro=self.distro, arch=self.arch, sr=sr, disks=[("0", self.rootDiskSizeGB, False)], postinstall=postinstall, vifs=self.host.guestFactory().DEFAULT) else: disturl = xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP", "") vmurl = "%s/performance/base/%s" % (disturl, self.vm_image) xenrt.TEC().logverbose("Getting vm from %s" % (vmurl)) self.guest = xenrt.productLib( host=self.host).guest.createVMFromFile( host=self.host, guestname=self.vm_image, filename=vmurl, sr=sr) if self.vcpus: self.guest.cpuset(self.vcpus) if self.vm_ram: self.guest.memset(self.vm_ram) self.guest.removeCD() self.startGuest(self.guest) if self.rootDiskSizeGB > 15: self.resizeRootPartition(self.guest, self.rootDiskSizeGB) else: for vm in guests: if vm.getName() == "vm-worker": self.guest = vm self.installPhoronix(self.guest)
def installGuest(self, guests): # Install 'vm-worker' if not self.isNameinGuests(guests, "vm-worker"): xenrt.TEC().progress("Installing VM worker") postinstall = [] if self.postinstall is None else self.postinstall.split(",") sr = self.createSR() if self.vm_image is None: self.guest = xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname="vm-worker", vcpus=self.vcpus, memory=self.vm_ram, distro=self.distro, arch=self.arch, sr=sr, disks=[("0", self.rootDiskSizeGB, False)], postinstall=postinstall, vifs=self.host.guestFactory().DEFAULT) else: disturl = xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP", "") vmurl = "%s/performance/base/%s" % (disturl, self.vm_image) xenrt.TEC().logverbose("Getting vm from %s" % (vmurl)) self.guest = xenrt.productLib(host=self.host).guest.createVMFromFile( host=self.host, guestname=self.vm_image, filename=vmurl, sr=sr) if self.vcpus: self.guest.cpuset(self.vcpus) if self.vm_ram: self.guest.memset(self.vm_ram) self.guest.removeCD() self.startGuest(self.guest) if self.rootDiskSizeGB > 15: self.resizeRootPartition(self.guest, self.rootDiskSizeGB) else: for vm in guests: if vm.getName() == "vm-worker": self.guest = vm self.installPhoronix(self.guest)
def isEnabled(self, host): try: # Knows about existing shares. Won't need to worry about dups. share = xenrt.VMSMBShare(hostIndex=1) sr = xenrt.productLib(host=host).SMBStorageRepository(host, "CIFS-SR") sr.create(share) return True except: return False
def createVM(self, host, name): return xenrt.productLib(host=host).guest.createVM(\ host=host, guestname=name, distro=self.distro, arch=self.arch, memory=self.vmram, vcpus=self.vcpus, vifs=self.host.guestFactory().DEFAULT, disks=[], postinstall=self.postinstall)
def isEnabled(self, host): try: # Knows about existing shares. Won't need to worry about dups. share = xenrt.VMSMBShare(hostIndex=1) sr = xenrt.productLib(host=host).SMBStorageRepository( host, "CIFS-SR") sr.create(share) return True except: return False
def installGuest(self, guests): # Install 'vm-worker' if not self.isNameinGuests(guests, "vm-worker"): xenrt.TEC().progress("Installing VM worker") postinstall = [] if self.postinstall is None else self.postinstall.split(",") self.guest = xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname="vm-worker", vcpus=self.vcpus, memory=self.vm_ram, distro=self.distro, arch=self.arch, postinstall=postinstall, vifs=self.host.guestFactory().DEFAULT) else: for vm in guests: if vm.getName() == "vm-worker": self.guest = vm self.guest.installKernBench()
def run(self, arglist=None): sftp = self.host.sftpClient() # From http://downloads.sourceforge.net/project/lmbench/development/lmbench-3.0-a9/lmbench-3.0-a9.tgz lmbenchFilename = "lmbench.tgz" lmbenchSrc = "%s/%s" % (self.instFiledir, lmbenchFilename) lmbenchDest = "/root/%s" % lmbenchFilename # Install lmbench sftp.copyTo(lmbenchSrc, lmbenchDest) output = self.host.execdom0("tar xvfz /root/lmbench.tgz") xenrt.TEC().logverbose("output: %s" % output) output = self.host.execdom0("tar xvfz /root/lmbench/lmbench-3.0-a9.tgz") xenrt.TEC().logverbose("output: %s" % output) instDir = "/root/lmbench-3.0-a9" # Install make, gcc extraargs = "" if xenrt.productLib(host=self.host) == xenrt.lib.xenserver: extraargs = "--disablerepo=citrix --enablerepo=base,updates" cmds = [ "yum %s install -y make" % extraargs, "yum %s install -y gcc" % extraargs, ] for cmd in cmds: output = self.host.execdom0(cmd) xenrt.TEC().logverbose("output: %s" % output) # Run the script which tells us what OS name lmbench will use -- normally "i686-pc-linux-gnu" in 32-bit dom0 # (Note: the output from this script is different depending on pwd! Run from within scripts directory.) osName = self.host.execdom0("cd %s/scripts && ./os" % instDir).strip() xenrt.TEC().logverbose("lmbench calls the dom0 OS '%s'" % osName) # Install the config file configDestDir = "%s/bin/%s" % (instDir, osName) configDest = "%s/CONFIG.%s" % (configDestDir, self.host.getName()) self.host.execdom0("mkdir -p %s" % configDestDir) self.host.execdom0("cp /root/lmbench/lmbench.config %s" % configDest) # Install the info file self.host.execdom0("cp %s/scripts/info-template %s/INFO.q9" % (instDir, configDestDir)) # "INFO.q9" because this is what is specified in the CONFIG file substitutions = [ "s/^MB=[0-9]*$/MB=%d/" % self.mb, # Tweak the 'MB' parameter "s/^OS=\"[^\"]*\"$/OS=\"%s\"/" % osName, # Tweak the 'OS' parameter ] # Perform substitutions on config file for sub in substitutions: self.host.execdom0("sed -i '%s' %s" % (sub, configDest)) # Run the benchmark (repeatedly) cmds = ["make rerun"] * self.runs # use "rerun" rather than "results" so it picks up the existing CONFIG file for cmd in cmds: output = self.host.execdom0("cd %s && %s" % (instDir, cmd), timeout=7200) xenrt.TEC().logverbose("output: %s" % output) # Get the output remoteOutdir = "%s/results/%s" % (instDir, osName) self.host.execdom0("cd %s; for file in *; do newname=`echo $file | sed 's/^.*\./iter-/'`; mkdir $newname; ln -s ../$file $newname/results.log; done" % remoteOutdir) xenrt.TEC().logverbose("copying outdir %s to logs" % remoteOutdir) sftp.copyTreeFromRecurse(remoteOutdir, xenrt.TEC().getLogdir()) xenrt.TEC().logverbose("copied outdir to logs")
def installTemplate(self, guests): # Install 'vm-template' if not self.isNameinGuests(guests, "vm-template"): xenrt.TEC().progress("Installing VM template") postinstall = [] if self.postinstall is None else self.postinstall.split(",") if self.vm_image: disturl = xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP", "") vmurl = "%s/performance/base/%s" % (disturl, self.vm_image) xenrt.TEC().logverbose("Getting vm from %s" % (vmurl)) self.template = xenrt.productLib(host=self.host).guest.createVMFromFile( host=self.host, guestname=self.vm_image, filename=vmurl) if self.vcpus_per_vm: self.template.cpuset(self.vcpus_per_vm) if self.vm_ram: self.template.memset(self.vm_ram) self.template.removeCD() self.template.start() else: self.template = xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname="vm-template", vcpus=self.vcpus_per_vm, memory=self.vm_ram, distro=self.distro, arch=self.arch, postinstall=postinstall, vifs=self.host.guestFactory().DEFAULT) if self.template.windows: if not isinstance(self.template, xenrt.lib.esx.Guest): self.template.installDrivers(extrareboot=True) # Use pvsoptimize to reduce background tasks and IO urlperf = xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP", "") pvsexe = "TargetOSOptimizer.exe" pvsurl = "%s/performance/support-files/%s" % (urlperf, pvsexe) xenrt.TEC().logverbose("Getting pvsfile from %s" % (pvsurl)) pvsfile = xenrt.TEC().getFile(pvsurl,pvsurl) cpath = "c:\\%s" % pvsexe self.template.xmlrpcSendFile(pvsfile, cpath) self.template.xmlrpcExec("%s /s" % cpath) if self.bench == "fio": self.template.installFioWin() else: self.template.installIOMeter() # Reboot once more to ensure everything is quiescent self.template.reboot() else: if isinstance(self.template, xenrt.lib.esx.Guest): self.template.installTools() if self.bench == "fio": self.installFioOnLinuxGuest() else: self.template.installLatency() libsynexec.initialise_slave(self.template) if self.distro.startswith("rhel") or self.distro.startswith("centos") or self.distro.startswith("oel"): # When we clone this guest, we don't want it to remember its MAC address self.template.execguest("sed -i /HWADDR/d /etc/sysconfig/network-scripts/ifcfg-eth0") # Shutdown VM for cloning self.shutdown_vm(self.template) else: for vm in guests: if vm.getName() == "vm-template": self.template = vm self.windows = self.template.windows
def installTemplate(self, guests): # Install 'vm-template' if not self.isNameinGuests(guests, "vm-template"): xenrt.TEC().progress("Installing VM template") postinstall = [] if self.postinstall is None else self.postinstall.split( ",") if self.vm_image: disturl = xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP", "") vmurl = "%s/performance/base/%s" % (disturl, self.vm_image) xenrt.TEC().logverbose("Getting vm from %s" % (vmurl)) self.template = xenrt.productLib( host=self.host).guest.createVMFromFile( host=self.host, guestname=self.vm_image, filename=vmurl) if self.vcpus_per_vm: self.template.cpuset(self.vcpus_per_vm) if self.vm_ram: self.template.memset(self.vm_ram) self.template.removeCD() self.template.start() else: self.template = xenrt.productLib(host=self.host).guest.createVM(\ host=self.host, guestname="vm-template", vcpus=self.vcpus_per_vm, memory=self.vm_ram, distro=self.distro, arch=self.arch, postinstall=postinstall, vifs=self.host.guestFactory().DEFAULT) if self.template.windows: if not isinstance(self.template, xenrt.lib.esx.Guest): self.template.installDrivers(extrareboot=True) # Use pvsoptimize to reduce background tasks and IO urlperf = xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP", "") pvsexe = "TargetOSOptimizer.exe" pvsurl = "%s/performance/support-files/%s" % (urlperf, pvsexe) xenrt.TEC().logverbose("Getting pvsfile from %s" % (pvsurl)) pvsfile = xenrt.TEC().getFile(pvsurl, pvsurl) cpath = "c:\\%s" % pvsexe self.template.xmlrpcSendFile(pvsfile, cpath) self.template.xmlrpcExec("%s /s" % cpath) if self.bench == "fio": self.template.installFioWin() else: self.template.installIOMeter() # Reboot once more to ensure everything is quiescent self.template.reboot() else: if isinstance(self.template, xenrt.lib.esx.Guest): self.template.installTools() if self.bench == "fio": self.installFioOnLinuxGuest() else: self.template.installLatency() libsynexec.initialise_slave(self.template) if self.distro.startswith("rhel") or self.distro.startswith( "centos") or self.distro.startswith("oel"): # When we clone this guest, we don't want it to remember its MAC address self.template.execguest( "sed -i /HWADDR/d /etc/sysconfig/network-scripts/ifcfg-eth0" ) # Shutdown VM for cloning self.shutdown_vm(self.template) else: for vm in guests: if vm.getName() == "vm-template": self.template = vm self.windows = self.template.windows
def run(self, arglist=None): sftp = self.host.sftpClient() # From http://downloads.sourceforge.net/project/lmbench/development/lmbench-3.0-a9/lmbench-3.0-a9.tgz lmbenchFilename = "lmbench.tgz" lmbenchSrc = "%s/%s" % (self.instFiledir, lmbenchFilename) lmbenchDest = "/root/%s" % lmbenchFilename # Install lmbench sftp.copyTo(lmbenchSrc, lmbenchDest) output = self.host.execdom0("tar xvfz /root/lmbench.tgz") xenrt.TEC().logverbose("output: %s" % output) output = self.host.execdom0( "tar xvfz /root/lmbench/lmbench-3.0-a9.tgz") xenrt.TEC().logverbose("output: %s" % output) instDir = "/root/lmbench-3.0-a9" # Install make, gcc extraargs = "" if xenrt.productLib(host=self.host) == xenrt.lib.xenserver: extraargs = "--disablerepo=citrix --enablerepo=base,updates" cmds = [ "yum %s install -y make" % extraargs, "yum %s install -y gcc" % extraargs, ] for cmd in cmds: output = self.host.execdom0(cmd) xenrt.TEC().logverbose("output: %s" % output) # Run the script which tells us what OS name lmbench will use -- normally "i686-pc-linux-gnu" in 32-bit dom0 # (Note: the output from this script is different depending on pwd! Run from within scripts directory.) osName = self.host.execdom0("cd %s/scripts && ./os" % instDir).strip() xenrt.TEC().logverbose("lmbench calls the dom0 OS '%s'" % osName) # Install the config file configDestDir = "%s/bin/%s" % (instDir, osName) configDest = "%s/CONFIG.%s" % (configDestDir, self.host.getName()) self.host.execdom0("mkdir -p %s" % configDestDir) self.host.execdom0("cp /root/lmbench/lmbench.config %s" % configDest) # Install the info file self.host.execdom0( "cp %s/scripts/info-template %s/INFO.q9" % (instDir, configDestDir) ) # "INFO.q9" because this is what is specified in the CONFIG file substitutions = [ "s/^MB=[0-9]*$/MB=%d/" % self.mb, # Tweak the 'MB' parameter "s/^OS=\"[^\"]*\"$/OS=\"%s\"/" % osName, # Tweak the 'OS' parameter ] # Perform substitutions on config file for sub in substitutions: self.host.execdom0("sed -i '%s' %s" % (sub, configDest)) # Run the benchmark (repeatedly) cmds = [ "make rerun" ] * self.runs # use "rerun" rather than "results" so it picks up the existing CONFIG file for cmd in cmds: output = self.host.execdom0("cd %s && %s" % (instDir, cmd), timeout=7200) xenrt.TEC().logverbose("output: %s" % output) # Get the output remoteOutdir = "%s/results/%s" % (instDir, osName) self.host.execdom0( "cd %s; for file in *; do newname=`echo $file | sed 's/^.*\./iter-/'`; mkdir $newname; ln -s ../$file $newname/results.log; done" % remoteOutdir) xenrt.TEC().logverbose("copying outdir %s to logs" % remoteOutdir) sftp.copyTreeFromRecurse(remoteOutdir, xenrt.TEC().getLogdir()) xenrt.TEC().logverbose("copied outdir to logs")