def _verifyTpmQuote(self, quoteParser, aik, nonce): if not quoteParser.verifyQuote(aik, nonce): raise xenrt.XRTFailure("Quote value: %s, nonce: %s, AIK; %s was not valid" % (quoteParser.getCurrentQuoteValue(), nonce, aik))
def licenseGraceTest(self, license): # Assign license and verify it. licenseinUse = self.licenseManager.addLicensesToServer( self.v6, license) self.licenseManager.applyLicense(self.v6, self.systemObj, license, licenseinUse) # Force the host to have grace license. self.disableLicenseServer() # Check whether the hosts obtained grace licenses. for host in self.systemObj.getHosts(): if not self.checkGraceLicense(host): self.enableLicenseServer() self.licenseManager.releaseLicense(self.systemObj) raise xenrt.XRTFailure( "The host %s is failed to aquire grace license" % host) else: self.featureFlagValidation(license) # Force the hosts to regain its orignal licenses. self.enableLicenseServer() # Check whether the hosts regained the original licenses. self.systemObj.checkLicenseState(edition=license.getEdition()) self.licenseManager.verifyLicenseServer(license, self.v6, licenseinUse, self.systemObj) # Again force the host to have grace license. self.disableLicenseServer() # Check whether the hosts obtained grace licenses. for host in self.hosts: if not self.checkGraceLicense(host): self.enableLicenseServer() self.licenseManager.releaseLicense(self.systemObj) raise xenrt.XRTFailure( "The host %s is failed to aquire grace license again" % host) else: self.featureFlagValidation(license) # Now expire one of the host license such that it cross the grace period. host = self.expireLicense( ) # for both hosts expire, provide allhosts=True # Check whether the license is expired. if not self.checkLicenseExpired( host): # for all hosts just provide the param edition. self.enableLicenseServer() self.licenseManager.releaseLicense(self.systemObj) raise xenrt.XRTFailure("License is not expired properly.") else: self.featureFlagValidation() # Check whether the hosts license expired. self.systemObj.checkLicenseState(edition=license.getEdition()) # Cleaning up. self.enableLicenseServer() self.licenseManager.releaseLicense(self.systemObj) self.featureFlagValidation() self.cleanUpFistPoint(host) # if any.
def run(self, arglist=[]): extraDisks = (self.NO_OF_VDIS / self.NO_OF_VMS ) -1 xenrt.TEC().logverbose("Performance testing lvmohba SR on a pool of %d hosts with %d guests, attached with %d extra disks" % (self.NO_OF_HOSTS, self.NO_OF_VMS, extraDisks)) if (len(self.hosts) != self.NO_OF_HOSTS): raise xenrt.XRTFailure("The requisite of the test demands %d hosts in a pool." % (self.NO_OF_HOSTS)) xenrt.TEC().logverbose("Creating a pool of %d hosts." % (self.NO_OF_HOSTS)) # 1. Create the pool of servers. self.pool = xenrt.lib.xenserver.poolFactory(self.hosts[0].productVersion)(self.hosts[0]) self.pool.master = self.hosts[0] # Add all remaining hosts to the pool. for host in self.hosts[1:]: # The host joining the pool cannot contain any shared storage. for sr in host.minimalList("sr-list", args="content-type=iso type=iso"): host.forgetSR(sr) self.pool.addHost(host) self.pool.setPoolParam("name-label", "lvmoHBAPool") self.pool.check() xenrt.TEC().logverbose("Creating %d lvmoHBA SR on the pool of %d hosts." % (self.NO_OF_VDIS, self.NO_OF_HOSTS)) # Find the SCSIIDs by diffing /dev/disk/by-scsid scsiIDs = self.hosts[0].execdom0("ls /dev/disk/by-scsid").strip().split("\n") scsiIDs = [x for x in scsiIDs if x.startswith("360a98000")] # only netapp luns that we created for the test. scsiIDs.sort() # sort it before whole list comparision. xenrt.TEC().logverbose("Found %d SCSIDs on the master %s: %s" % (len(scsiIDs), self.hosts[0], scsiIDs)) if (self.NO_OF_VDIS != len(scsiIDs)): raise xenrt.XRTFailure("We have created %d LUNs on the filer. Reported only %d SCSIDs." % (self.NO_OF_VDIS, len(scsiIDs))) lvmohbaSRuuid = [] # list of lvmoHBA SR uuids lvmohbaSRObject = [] counter = 0 # 2. Create lvmoHBA SRs on the master. timeNow = xenrt.util.timenow() for scsid in scsiIDs: fcName = ("lvmoHBASR%d" % counter) fcSR = xenrt.lib.xenserver.FCStorageRepository(self.hosts[0], fcName) lvmohbaSRObject.append(fcSR) fcSR.create(scsid) lvmohbaSRuuid.append(fcSR.uuid) counter = counter + 1 xenrt.TEC().logverbose("Time taken to create %d lvmoHBA SR on master %s is %s seconds." % (self.NO_OF_VDIS, self.hosts[0], (xenrt.util.timenow() - timeNow))) if (self.NO_OF_VDIS != len(lvmohbaSRuuid)): raise xenrt.XRTFailure("We have created %d LUNs on the filer. Reported only %d lvmohbaSRuuid." % (self.NO_OF_VDIS, len(lvmohbaSRuuid))) xenrt.TEC().logverbose("Scanning all the lvmoHBA SR on the pool of %d hosts." % (self.NO_OF_HOSTS)) # 3. Time taken to scan the lvmoHBA SR. timeNow = xenrt.util.timenow() for sr in lvmohbaSRObject: sr.scan() xenrt.TEC().logverbose("Time taken to scan %d lvmoHBA SR on the pool with %d LUNs mapped: %s seconds." % (self.NO_OF_VDIS, self.NO_OF_VDIS, (xenrt.util.timenow() - timeNow))) # 4. Time taken to reboot all hosts in the pool after creating the SR. rebootTag = "After creating lvmoHBA SR Reboot" self.poolReboot(rebootTag) # 5. Create and install number of guessts in parallel using XenRT pfarm. self.vmInstall(lvmohbaSRuuid) # 6. Now attaching extra-disks to each VM from the remaining. self.attachExtraDisks(lvmohbaSRuuid) # 7. Installing IOZone test tool on each guest and running in parallel using XenRT pfarm. self.startIOZoneParallely() # 8. Time taken to shutdown all the guests serially with extra disks attached. vmTag = ("with %d extra disks attached" % (extraDisks)) self.vmShutdown(vmTag) # 9. Time taken to start all the guests serially with extra disks attached. vmTag = ("with %d extra disks attached" % (extraDisks)) self.vmStart(vmTag) # 10 Rebooting all hosts in pool with all guests installed and each guest attached with extra disks. rebootTag = "After installing the guests Reboot" self.poolReboot(rebootTag) # 11. Starting the guests again. vmTag = "after pool reboot" self.vmStart(vmTag) # 12. Time taken to shutdown the guests with extra disks attached. vmTag = "before un-installation" self.vmShutdown(vmTag) # 13. Time taken to uninstall the guests with extra disks attached. vmTag = ("with %d extra disks attached" % (extraDisks)) self.vmUninstall(vmTag) xenrt.TEC().logverbose("Destroying the lvmoHBA SR on the pool of %d hosts." % (self.NO_OF_HOSTS)) # 14. Destroy the lvmoHBA SR on the pool. timeNow = xenrt.util.timenow() for sruuid in lvmohbaSRuuid: self.hosts[0].destroySR(sruuid) xenrt.TEC().logverbose("Time taken to destroy the lvmoHBA SR on %d hosts pool: %s seconds." % (self.NO_OF_HOSTS, (xenrt.util.timenow() - timeNow)))
while True: try: DTMResult = self.targetGuest.xmlrpcFileExists( "c:\\DTMServiceResult.txt") except Exception, e: xenrt.TEC().warning( "Exception checking for DTMServiceResult text file") xenrt.sleep(300) break if DTMResult: xenrt.TEC().logverbose("DTMServiceResult text file found") self.targetGuest.xmlrpcStart("del /f c:\\DTMServiceResult.txt") self.targetGuest.xmlrpcStart("del /f c:\\DTMResScript.js") break if xenrt.util.timenow() > self.timeOut: raise xenrt.XRTFailure( "Timed out waiting for DTM installation to complete") xenrt.sleep(60) def installDotNet(self, arglist=None): if self.DotNetVersion == "3.5": self.targetGuest.installDotNet35() elif self.DotNetVersion == "4": self.targetGuest.installDotNet4() def changeHostName(self, arglist=None): self.targetGuest.xmlrpcExec( "wmic computersystem where name=\"%%COMPUTERNAME%%\" call rename name=\"%s\"" % (self.winHostName)) self.targetGuest.reboot() def installPVDrivers(self, arglist=None):
def SVVPDTMClientInstall(self, arglist=None): self.targetGuest.xmlrpcExec( "\\\\%s\DtmInstall\\Client\\Setup.exe /qb ICFAGREE=Yes" % (self.DTMServerName)) DTMClientInstall = r""" Delay(20000); var expString="No tasks are running"; var WshShellObj = new ActiveXObject("WScript.Shell"); for(var count=0; count<1100; count++) { var WshShellExecObj2 = WshShellObj.Exec("tasklist /FI \"imagename eq Setup.exe*\""); var kitSetUpStats=WshShellExecObj2.StdOut.ReadAll(); var n=kitSetUpStats.indexOf(expString); if(n!=-1){ Delay(20000); var fs= new ActiveXObject('Scripting.FileSystemObject'); var WshShellObj = new ActiveXObject("WScript.Shell"); var WshShellExecObj = WshShellObj.Exec("tasklist /FI \"imagename eq WLKSvc.exe*\""); var kitSetUpStats=WshShellExecObj.StdOut.ReadAll(); var n=kitSetUpStats.indexOf(expString); if(n==-1){ var fs= new ActiveXObject('Scripting.FileSystemObject'); var fname1=fs.CreateTextFile("c:\\DTMClientInstalled.txt", true); } break; } Delay(5000); } function Delay(milliseconds) { var start = new Date().getTime(); for (var i = 0; i < 1e7; i++) { if ((new Date().getTime() - start) > milliseconds){ break; } } }""" self.targetGuest.xmlrpcWriteFile("c:\\DTMClientInstall.js", DTMClientInstall) self.targetGuest.xmlrpcStart("c:\\DTMClientInstall.js") self.timeOut = xenrt.util.timenow() + 150 while True: try: DTMClientResult = self.targetGuest.xmlrpcFileExists( "c:\\DTMClientInstalled.txt") except Exception, e: xenrt.TEC().warning( "Exception checking for DTMClientInstalled text file") xenrt.sleep(300) break if DTMClientResult: xenrt.TEC().logverbose("DTMClientInstalled text file found") self.targetGuest.xmlrpcStart( "del /f c:\\DTMClientInstalled.txt") self.targetGuest.xmlrpcStart("del /f c:\\DTMClientInstall.js") break if xenrt.util.timenow() > self.timeOut: raise xenrt.XRTFailure( "Timed out waiting for DTM client installation to complete" ) xenrt.sleep(60)
def run(self, arglist=None): data = self.guest.paramGet("allowed-operations").strip() allowedOps = data.split("; ") if "hard_reboot" in allowedOps: raise xenrt.XRTFailure("CA-30367 Suspended VM has hard_reboot as " "an allowed operation")
def run(self, arglist): try: TCXSA24.run(self, arglist) except xenrt.XRTFailure, e: if "Xen will only load images" in str(e.data): xenrt.TEC().logverbose( "Error Message while starting a VM with invalid kernel: %s" % str(e.data)) pass else: raise xenrt.XRTFailure( "Unexpected error message while starting a VM with invalid kernel", data=str(e.data)) else: raise xenrt.XRTFailure( "Succeeded to start a VM with an invalid kernel") class TCXSA87(TCXSA29): """Test to verify XSA-87""" # Jira TC-23743 VULN = 87 def run(self, arglist=None): self.host.execdom0("xl create /root/minios.cfg") self.checkHost() if not self.host.guestconsolelogs: raise xenrt.XRTFailure("No guest console logs")
def verifySystemVMsRecoverFromOutage(self): systemVms = self.cloud.marvin.cloudApi.listSystemVms(podid=self.podid) cpvms = filter(lambda x: x.systemvmtype == 'consoleproxy', systemVms) ssvms = filter(lambda x: x.systemvmtype == 'secondarystoragevm', systemVms) xenrt.xrtAssert( len(cpvms) + len(ssvms) == len(systemVms), 'Unexpected System VM type reported') (ignore, originalCapacity) = self.diffZoneCapacityWithCurrent( zoneid=self.zoneid) # Stop the storage VM (simulate the outage) self.storageVM.shutdown(force=True) # Wait for the system VMs to exit the Up state self.waitForSystemVmAgentState(self.podid, state='Up', timeout=600, exitState=True) self.storageVM.start() # Wait for the system VMs to recover self.waitForSystemVmAgentState(self.podid, state='Up', timeout=1200) # Check the number of system VMs has not changed newsystemVms = self.cloud.marvin.cloudApi.listSystemVms( podid=self.podid) newcpvms = filter(lambda x: x.systemvmtype == 'consoleproxy', newsystemVms) newssvms = filter(lambda x: x.systemvmtype == 'secondarystoragevm', newsystemVms) xenrt.xrtAssert( len(newcpvms) + len(newssvms) == len(newsystemVms), 'Unexpected System VM type reported') xenrt.xrtAssert( len(cpvms) == len(newcpvms), 'Number of Console Proxy VMs not the same after outage') xenrt.xrtAssert( len(ssvms) == len(newssvms), 'Number of Secondary Storage VMs not the same after outage') # Chkec all Hosts are Up and then recheck that all System VMs are still up self.waitForHostState(self.podid, state='Up', timeout=600) self.waitForSystemVmAgentState(self.podid, state='Up', timeout=300) if self.VERIFY_USER_INSTANCES: self.waitForUserInstanceState(instanceNames=map( lambda x: x.name, self.instances), state='Running', timeout=300) for instance in self.instances: try: instance.assertHealthy() except Exception, e: # VMs may fail when their disks are removed - reboot any VMs that are not responding xenrt.TEC().logverbose( 'Instance: %s health check failed with: %s' % (instance.name, str(e))) xenrt.TEC().logverbose('Reboot instance: %s' % (instance.name)) instance.reboot() self.waitForUserInstanceState(instanceNames=map( lambda x: x.name, self.instances), state='Running', timeout=300) map(lambda x: x.assertHealthy(), self.instances) # Verify that all VRs are running nonRunningVRs = filter( lambda x: x.state != 'Running', self.cloud.marvin.cloudApi.listRouters(listall='true')) if len(nonRunningVRs) > 0: xenrt.TEC().logverbose( 'VRs not in Running state: %s' % (pformat(map(lambda x: (x.name, x.state), nonRunningVRs)))) raise xenrt.XRTFailure( 'VR(s) not recovered after Primary Storage Outage')
class TCMigrate(xenrt.TestCase): WORKLOADS = [ "w_find", "w_memtest", #"w_spamcons", "w_forktest2" ] WINDOWS_WORKLOADS = [ "Prime95", "Ping", "SQLIOSim", "Burnintest", "NetperfTX", "NetperfRX", "Memtest" ] def __init__(self): xenrt.TestCase.__init__(self, "TCMigrate") self.workloads = None self.guest = None self.semclass = "TCMigrate" self.usedclone = False def run(self, arglist=None): loops = 50 live = "false" reboot = False target = None fast = False workloads = None gname = None clonevm = False iterreboot = False # Mandatory args for arg in arglist: l = string.split(arg, "=", 1) if l[0] == "guest": gname = l[1] if l[0] == "loops": loops = int(l[1]) elif l[0] == "live": live = "true" elif l[0] == "reboot": reboot = True elif l[0] == "iterreboot": iterreboot = True elif l[0] == "to": if l[1] != "localhost": target = l[1] elif l[0] == "fast": fast = True elif l[0] == "workloads": if len(l) > 1: workloads = l[1].split(",") else: workloads = self.WINDOWS_WORKLOADS elif l[0] == "config": matching = xenrt.TEC().registry.guestLookup(\ **xenrt.util.parseXMLConfigString(l[1])) for n in matching: xenrt.TEC().comment("Found matching guest(s): %s" % (matching)) if matching: gname = matching[0] elif l[0] == "clone": clonevm = True if not gname: raise xenrt.XRTError("No guest name specified") g = self.getGuest(gname) self.guest = g if g.distro and g.distro in string.split(\ xenrt.TEC().lookup("SKIP_MIGRATE_DISTROS", ""), ","): xenrt.TEC().skip("Skipping migrate on %s" % (g.distro)) return self.getLogsFrom(g.host) if xenrt.TEC().lookup("OPTION_USE_CLONE", False, boolean=True) or clonevm: xenrt.TEC().comment("Using clone to run test.") self.blocker = False if g.getState() != "UP": g.start() g.preCloneTailor() g.shutdown() clone = g.cloneVM() self.guest = clone g = clone self.usedclone = True self.getLogsFrom(g) if target: thost = xenrt.TEC().registry.hostGet(target) if not thost: raise xenrt.XRTError("Cannot find host %s in registry" % (target)) self.getLogsFrom(thost) hostlist = [thost, g.host] xenrt.TEC().comment("Migrating to %s" % (thost.getName())) else: hostlist = [g.host] xenrt.TEC().comment("Performing localhost migrate") if live == "true": xenrt.TEC().progress("Running %d iterations of live migrate " "using %s." % (loops, gname)) else: xenrt.TEC().progress("Running %d iterations of migrate using %s." % (loops, gname)) if fast: xenrt.TEC().comment("Using back to back migrations") try: if g.getState() == "DOWN": xenrt.TEC().comment("Starting guest %s before commencing " "migrate." % (g.name)) g.start() # Make sure the guest is healthy before we start if not g.windows: g.waitForSSH(60, desc="Guest check") else: g.waitForDaemon(60, desc="Guest check") # Make sure there is sufficient memory on the first target freemem = hostlist[0].getFreeMemory() if freemem < g.memory: if xenrt.TEC().lookup("MIGRATE_NOMEM_SKIP", False, boolean=True): xenrt.TEC().skip( "Skipping because of insufficent free " "memory on %s (%u < %u)" % (hostlist[0].getName(), freemem, g.memory)) return else: raise xenrt.XRTError( "Insufficent free " "memory on %s (%u < %u)" % (hostlist[0].getName(), freemem, g.memory)) # Start workloads on the guest if workloads: if g.windows: self.workloads = g.startWorkloads(workloads) else: self.workloads = g.startWorkloads(self.WORKLOADS) except Exception, e: traceback.print_exc(file=sys.stderr) raise xenrt.XRTError("Guest broken before we started (%s)" % (str(e))) success = 0 mt = xenrt.util.Timer() try: for i in range(loops): if xenrt.GEC().abort: xenrt.TEC().warning("Aborting on command") break h = hostlist[i % len(hostlist)] xenrt.TEC().logverbose( "Starting loop iteration %u (to %s)..." % (i, h.getName())) if not fast: domid = g.getDomid() skew1 = g.getClockSkew() g.migrateVM(h, live=live, fast=fast, timer=mt) if not fast: skew2 = g.getClockSkew() time.sleep(10) g.check() if not target: # On localhost make sure we did something if g.getDomid() == domid: raise xenrt.XRTError("Domain ID unchanged after " "migrate.") if skew1 != None and skew2 != None: delta = abs(skew2 - skew1) note = "Before the migrate the skew from controller " \ "time was %fs and afterwards it was %fs" % \ (skew1, skew2) xenrt.TEC().logverbose(note) if delta > 2000000.0: raise xenrt.XRTFailure( "Clock skew detected after " "migrate", note) else: # Check skew now, in these general tests we'll # allow a slight delay for the clock to fix # itself up time.sleep(5) skew3 = g.getClockSkew() delta = abs(skew3 - skew1) if delta > 3.0: note = "Before the suspend the skew from " \ "controller time was %fs and " \ "afterwards it was %fs, a short " \ "while later it was %fs" % \ (skew1, skew2, skew3) xenrt.TEC().warning("Clock skew detected " "after suspend/resume: " + note) success = success + 1 if iterreboot: g.reboot() if workloads: if g.windows: self.workloads = g.startWorkloads(workloads) else: self.workloads = g.startWorkloads(self.WORKLOADS) finally: xenrt.TEC().comment("%u/%u iterations successful." % (success, loops)) if mt.count() > 0: xenrt.TEC().logverbose("Migrate times: %s" % (mt.measurements)) xenrt.TEC().value("MIGRATE_MAX", mt.max()) xenrt.TEC().value("MIGRATE_MIN", mt.min()) xenrt.TEC().value("MIGRATE_AVG", mt.mean()) xenrt.TEC().value("MIGRATE_DEV", mt.stddev()) if fast: time.sleep(10) g.check() if workloads: g.stopWorkloads(self.workloads) try: if reboot: g.reboot() except xenrt.XRTFailure, e: raise xenrt.XRTError(e.reason)
allHostsReachedState = True break if len(hostsNotInState) > 0: xenrt.TEC().logverbose( 'Waiting for the following Hosts to reach state %s: %s' % (state, pformat(map(lambda x: (x.name, x.state), hostsNotInState)))) self.logCloudHostInfo() xenrt.sleep(pollPeriod) self.logCloudHostInfo() if not allHostsReachedState: raise xenrt.XRTFailure( 'Not all Hosts reached state %s in %d seconds' % (state, timeout)) def waitForUserInstanceState(self, instanceNames, state, timeout=300, pollPeriod=20): """Wait for all User Instances (specified) to reach the specified state""" xenrt.xrtAssert( len(instanceNames) > 0, 'No instance names specifed in call to waitForUserInstanceState') allInstancesReachedState = False startTime = datetime.now() while (datetime.now() - startTime).seconds < timeout: instanceData = filter(
def waitForSystemVmAgentState(self, podid, state, timeout=300, pollPeriod=20, exitState=False): """Wait for all System VMs (associated with the Pod) to reach the specified state""" allSystemVmsReachedState = False startTime = datetime.now() while (datetime.now() - startTime).seconds < timeout: systemVmData = self.cloud.marvin.cloudApi.listSystemVms( podid=podid) systemVmNameList = map(lambda x: x.name, systemVmData) hostData = filter(lambda x: x.name in systemVmNameList, self.cloud.marvin.cloudApi.listHosts()) self.logCloudHostInfo() if len(systemVmData) != len(hostData): xenrt.TEC().warning( 'Inconsistent System VM and Host data reported by MS') xenrt.TEC().logverbose( 'System VM State: %s' % (pformat(map(lambda x: (x.name, x.state), systemVmData)))) if not exitState: systemVmsNotInState = filter(lambda x: x.state != state, hostData) if len(systemVmsNotInState) == 0: if state == 'Up': # Check that the system VMs are also all Running systemVmsUpButNotInRunningState = filter( lambda x: x.state != 'Running', systemVmData) if len(systemVmsUpButNotInRunningState) > 0: xenrt.TEC().warning( 'System VM(s) %s reported as Up but not Running' % (pformat( map(lambda x: (x.name, x.state), systemVmsUpButNotInRunningState)))) continue xenrt.TEC().logverbose( 'System VMs [%s] reached state: %s in %d sec' % (systemVmNameList, state, (datetime.now() - startTime).seconds)) allSystemVmsReachedState = True break else: xenrt.TEC().logverbose( 'Waiting for the following system VMs to reach state %s: %s' % (state, pformat( map(lambda x: (x.name, x.state), systemVmsNotInState)))) xenrt.sleep(pollPeriod) else: systemVmsStillInState = filter(lambda x: x.state == state, hostData) if len(systemVmsStillInState) == 0: xenrt.TEC().logverbose( 'System VMs [%s] exited state: %s in %d sec' % (systemVmNameList, state, (datetime.now() - startTime).seconds)) xenrt.TEC().logverbose( ' New States: %s' % (pformat(map(lambda x: (x.name, x.state), hostData)))) allSystemVmsReachedState = True break else: xenrt.TEC().logverbose( 'Waiting for the following system VMs to exit state %s: %s' % (state, pformat( map(lambda x: (x.name, x.state), systemVmsStillInState)))) xenrt.sleep(pollPeriod) self.logCloudHostInfo() if not allSystemVmsReachedState: raise xenrt.XRTFailure( 'Not all System VMs %s state %s in %d seconds' % (exitState and 'exited' or 'reached', state, timeout))
def run(self, arglist): for f in self.FILES: entry = self.host.execdom0("ls -ld %s" % (f)) if not entry[2] == "w": raise xenrt.XRTFailure("No write permissions for %s. (%s)" % (f, entry))
self.host = self.getDefaultHost() def run(self, arglist): self.host.execdom0("touch /root/notalicensefile") cli = self.host.getCLIInstance() args = [] args.append("license-file=/root/notalicensefile") args.append("host-uuid=%s" % (self.host.getMyHostUUID())) try: cli.execute("host-license-add", string.join(args)) except xenrt.XRTFailure, e: if not re.search("Failed to read license file", e.reason): raise e else: raise xenrt.XRTFailure( "No error raised when adding invalid license file.") def postRun(self): self.host.execdom0("rm -f /root/notalicensefile") class TC967(xenrt.TestCase): """Incorrect guest name / UUID""" def prepare(self, arglist): self.host = self.getDefaultHost() def run(self, arglist): cli = self.host.getCLIInstance() try: cli.execute("vm-start", "uuid=00000000-0000-0000-0000-000000000000")
def __checkPcrString(self, pcrValue): if(pcrValue.count("f") == len(pcrValue)): raise xenrt.XRTFailure("PCR value contained all F's") if(pcrValue.count("0") == len(pcrValue)): raise xenrt.XRTFailure("PCR value contained all 0's")
def run(self, arglist=None): self.guest.enableIPv6Dhcp() if not self.guest.checkIsIPv6AdrressInRange(self.guest.mainip): raise xenrt.XRTFailure("Guest does not have a valid DHCP6 IPv6 address") IPv6WinGuestOnVlan.run(self, arglist)
class TCHibernate(xenrt.TestCase): """Tests hibernate initiated from within the guest.""" WORKLOADS = [ "w_find", "w_forktest2", #"w_spamcons", "w_memtest" ] WINDOWS_WORKLOADS = [ "Prime95", "Ping", "SQLIOSim", "Burnintest", "NetperfTX", "NetperfRX", "Memtest" ] def __init__(self): xenrt.TestCase.__init__(self, "TCHibernate") self.blocker = True self.guest = None self.workloads = None self.usedclone = False def run(self, arglist=None): loops = 50 reboot = False workloads = None gname = None clonevm = False for arg in arglist: l = string.split(arg, "=", 1) if l[0] == "guest": gname = l[1] if l[0] == "loops": loops = int(l[1]) if l[0] == "reboot": reboot = True elif l[0] == "workloads": if len(l) > 1: workloads = l[1].split(",") else: workloads = self.WINDOWS_WORKLOADS elif l[0] == "config": matching = xenrt.TEC().registry.guestLookup(\ **xenrt.util.parseXMLConfigString(l[1])) for n in matching: xenrt.TEC().comment("Found matching guest(s): %s" % (matching)) if matching: gname = matching[0] elif l[0] == "clone": clonevm = True if not gname: raise xenrt.XRTError("No guest name specified.") guest = self.getGuest(gname) self.guest = guest host = guest.host self.getLogsFrom(host) if xenrt.TEC().lookup("OPTION_USE_CLONE", False, boolean=True) or clonevm: xenrt.TEC().comment("Using clone to run test.") self.blocker = False if guest.getState() != "UP": guest.start() guest.preCloneTailor() guest.shutdown() clone = guest.cloneVM() self.guest = clone guest = clone self.usedclone = True self.getLogsFrom(guest) if guest.memory >= 4096: xenrt.TEC().skip("Skipping hibernate on > 4GB guest.") return if not guest.windows: xenrt.TEC().skip("Skipping hibernate on non-Windows guest.") return expfail = string.split(host.lookup("EXPFAIL_HIBERNATE", ""), ",") if guest.distro and guest.distro in expfail: xenrt.TEC().skip("Skipping hibernate for %s which is expected " "to fail." % (guest.distro)) return try: # Make sure the guest is up if guest.getState() == "DOWN": xenrt.TEC().comment("Starting guest before commencing loop.") guest.start() # Make sure the guest is healthy before we start. guest.waitForDaemon(60, desc="Guest check") # Start workloads on the guest. if workloads: if guest.windows: self.workloads = guest.startWorkloads(workloads) else: self.workloads = guest.startWorkloads(self.WORKLOADS) except Exception, e: xenrt.TEC().logverbose("Guest broken before we started (%s)." % str(e)) raise # Enable hibernate for Tampa Guests if isinstance(guest, xenrt.lib.xenserver.guest.TampaGuest): guest.paramSet("platform:acpi_s4", "true") guest.reboot() # Enable hibernation. try: guest.winRegAdd( "HKCU", "Software\\Policies\\Microsoft\\Windows\\" "System\\Power", "PromptPasswordOnResume", "DWORD", 0) try: guest.xmlrpcExec( "powercfg.exe /GLOBALPOWERFLAG off /OPTION RESUMEPASSWORD") except: pass except: pass try: guest.xmlrpcExec("powercfg.exe /HIBERNATE ON") except: pass # Test hibernate in a loop success = 0 try: for i in range(loops): xenrt.TEC().logverbose("Starting loop iteration %u..." % (i)) host.listDomains() attempt = 0 while True: try: # Ignore errors since we may get the connection # severed on the down guest.xmlrpcStart("shutdown /h") except: pass try: guest.poll("DOWN", timeout=1200) break except Exception, e: try: # See if the hibernate started, i.e. we can't ping # the execdaemon. guest.checkReachable() except: guest.checkHealth(unreachable=True) raise xenrt.XRTFailure("Hibernate didn't complete") guest.check() if attempt == 2: self.blocker = False raise xenrt.XRTFailure( "Hibernate didn't happen after 3 attempts") else: xenrt.TEC().warning( "Hibernate didn't seem to happen.") attempt = attempt + 1 continue time.sleep(2) host.listDomains() guest.start(skipsniff=True) success = success + 1 finally: self.tec.comment("%u/%u iterations successful" % (success, loops)) # Stop guest workloads. if workloads: guest.stopWorkloads(self.workloads) try: if reboot: guest.reboot() except xenrt.XRTFailure, e: raise xenrt.XRTError(e.reason)
def run(self, arglist = None): host = self.getDefaultHost() guest = host.createGenericLinuxGuest() vif = guest.getVIFUUID("eth0") self.uninstallOnCleanup(guest) #Create arbitrary param-keys for ipv4-allowed and ipv6-allowed ipv4address = ["127.0.0.2"] ipv6address = ["0::0:1:1"] #CLI testing step("Add ivp4-allowed and ipv6-allowed param for the vif via CLI") guest.setVifAllowedIPv4Addresses(vif, ipv4address) guest.setVifAllowedIPv6Addresses(vif, ipv6address) step("Clear ipv4-allowed and ipv6-allowed for the vif via CLI") guest.clearVifAllowedAddresses(vif) step("Verify whether the params get cleared") ipv4allowed = host.genParamGet("vif", vif, "ipv4-allowed") ipv6allowed = host.genParamGet("vif", vif, "ipv6-allowed") if ipv4allowed: raise xenrt.XRTFailure("ipv4-allowed didn't get cleared via vif-param-clear. ipv4-allowed=%s" %ipv4allowed) if ipv6allowed: raise xenrt.XRTFailure("ipv6-allowed didn't get cleared via vif-param-clear. ipv6-allowed=%s" %ipv6allowed) log("Both ipv4-allowed and ipv6-allowed got cleared via vif-param-clear") step("Add ipv4-allowed and ipv6-allowed again to test vif-param-remove") guest.setVifAllowedIPv4Addresses(vif, ipv4address) guest.setVifAllowedIPv6Addresses(vif, ipv6address) step("Remove ipv4-allowed and ipv6-allowed for the vif via CLI") cli = host.getCLIInstance() cli.execute("vif-param-remove", "uuid=%s param-name=ipv4-allowed param-key=%s" %(vif, ipv4address[0])) cli.execute("vif-param-remove", "uuid=%s param-name=ipv6-allowed param-key=%s" %(vif, ipv6address[0])) step("Verify whether the params get removed") ipv4allowed = host.genParamGet("vif", vif, "ipv4-allowed") ipv6allowed = host.genParamGet("vif", vif, "ipv6-allowed") if ipv4allowed: raise xenrt.XRTFailure("ipv4-allowed didn't get removed via vif-param-remove. ipv4-allowed=%s" %ipv4allowed) if ipv6allowed: raise xenrt.XRTFailure("ipv6-allowed didn't get removed via vif-param-remove. ipv6-allowed=%s" %ipv6allowed) log("Both ipv4-allowed and ipv6-allowed got removed via vif-param-remove") #API testing step("Add ipv4-allowed and ipv6-allowed again for the vif via CLI") guest.setVifAllowedIPv4Addresses(vif, ipv4address) guest.setVifAllowedIPv6Addresses(vif, ipv6address) step("Create an API session to the host") session = host.getAPISession() xapi = session.xenapi log("CALL: xenapi.VIF.get_all()") vif_opaqueref = xapi.VIF.get_all()[0] #Opaque ref for the VIF log("RESULT: %s" % vif_opaqueref) step("Attempt to remove the ipv6_allowed with the key for ipv4_allowed, using API call") log("CALL: xenapi.VIF.remove_ivp6_allowed('%s','%s')" %(vif_opaqueref, ipv4address[0])) xapi.VIF.remove_ipv6_allowed(vif_opaqueref, ipv4address[0]) #Close the API Session host.logoutAPISession(session) #Previously the remove_ipv6_allowed function was incorrect. #In that case, the above call will result in ipv4_allowed getting cleared. #If fixed, ipv4_allowed should remain ipv4allowed = host.genParamGet("vif", vif, "ipv4-allowed") if not ipv4allowed: #if ipv4_allowed gets removed raise xenrt.XRTFailure("ipv4_allowed parameter got removed while calling remove_ipv6_allowed() with incorrect param-key")
'Restarting Management Server: Attempt: %d of %d' % (rebootChecks + 1, maxReboots)) self.primaryManagementServer.execcmd( 'mysql -u cloud --password=cloud -h %s --execute="UPDATE cloud.configuration SET value=8096 WHERE name=\'integration.api.port\'"' % self.dbServer.getIP()) self.restart(checkHealth=False, startStop=(rebootChecks > 0)) rebootChecks += 1 if not managementServerOk: # Store the MS logs mgmtSvrHealthCheckFailedLogDir = os.path.join( xenrt.TEC().getLogdir(), 'cloud', 'healthFailure') if not os.path.exists(mgmtSvrHealthCheckFailedLogDir): os.makedirs(mgmtSvrHealthCheckFailedLogDir) self.getLogs(mgmtSvrHealthCheckFailedLogDir) raise xenrt.XRTFailure('Management Server not reachable') def restart(self, checkHealth=True, startStop=False): if not startStop: for m in self.readyManagementServers: m.execcmd('service %s-management restart' % (self.cmdPrefix)) else: self.stop() xenrt.sleep(120) self.start() if checkHealth: self.checkManagementServerHealth() def stop(self): for m in self.readyManagementServers:
def run(self, arglist=None): for guest in self.host.guests.values(): if guest.paramGet("PV-drivers-up-to-date") == "true": raise xenrt.XRTFailure( "field PV-drivers-up-to-date not updated")
def run(self, arglist=[]): out = self.guest.execcmd( "cd /root/meliotest/fork_op && /bin/echo -e '\\n' | ./NSnD /fs1/dir2 1 100 local" ).splitlines() if out[-1] != 'FORK_OP_RESULT=0': raise xenrt.XRTFailure("fork_op returned non-zero: %s" % out[-1])
def run(self, arglist=None): machine = "RESOURCE_HOST_0" if arglist and len(arglist) > 0: machine = arglist[0] host = xenrt.TEC().registry.hostGet(machine) if not host: raise xenrt.XRTError("Unable to find host %s in registry" % (machine)) self.getLogsFrom(host) # Select allowed ports by product self.expected = string.split(xenrt.TEC().lookup("NMAP_ONLY_PORTS", "")) if len(self.expected) == 0: self.expected.extend( string.split( host.lookup("NMAP_ALLOWED_PORTS", "tcp/22 tcp/6936"))) self.allowedservices.extend(\ string.split(host.lookup("NMAP_ALLOWED_SERVICES", "nlockmgr"))) # Run nmap to scan open ports outfile = "%s/nmap.txt" % (self.tec.getLogdir()) xmlfile = "%s/nmap.xml" % (self.tec.getLogdir()) xenrt.nmap(host.getIP(), xmlfile, outfile) if not os.path.exists(xmlfile): raise xenrt.XRTError("nmap output file not found") # Parse nmap output ports = [] portlist = [] dom = xml.dom.minidom.parse(xmlfile) for i in dom.childNodes: if i.nodeType == i.ELEMENT_NODE and i.localName == "nmaprun": for c in i.childNodes: if c.nodeType == c.ELEMENT_NODE and c.localName == "host": for x in c.childNodes: if x.nodeType == x.ELEMENT_NODE and \ x.localName == "ports": for p in x.childNodes: if p.nodeType == p.ELEMENT_NODE and \ p.localName == "port": proto = p.getAttribute("protocol") port = p.getAttribute("portid") service = "UNKNOWN" state = "UNKNOWN" for z in p.childNodes: if z.nodeType == z.ELEMENT_NODE \ and z.localName == "service": service = z.getAttribute( "name") elif z.nodeType == z.ELEMENT_NODE \ and z.localName == "state": state = z.getAttribute("state") ports.append(("%s/%s" % (proto, port), service, state)) portlist.append("%s/%s" % (proto, port)) self.tec.logverbose("Parsed ports: %s" % ( ` ports `)) # Check expected open ports are open passed = True for i in self.expected: if re.search(r"^\(.+\)$", i): # Non-compulsory port pass elif not i in portlist: self.tec.reason("Port %s is not open" % (i)) passed = False else: self.tec.comment("Expected open port %s found to be open" % (i)) # Check for any unexpected open ports for i in ports: port, service, state = i if state == "open" or state == "UNKNOWN": if (not port in self.expected) and \ (not "(%s)" % (port) in self.expected): if not service in self.allowedservices: self.tec.reason("Unexpected port %s (%s) is open" % (port, service)) passed = False else: self.tec.comment( "Allowed service %s found on port %s" % (service, port)) if not passed: raise xenrt.XRTFailure()
class TC6940(_VBDPlugWindows): """Hot unplug of a VBD that the Windows VM had attached when it was booted.""" DISTRO = "w2k3eesp2" class TC6949(_VBDPlugLinux): """Hot unplug of a VBD that the Linux VM had attached when it was booted.""" pass class TC27127(xenrt.TestCase): def run(self, arglist): host = self.getDefaultHost() guest = host.getGuest("winguest2") try: guest.createDisk("1073741824", sruuid=host.getLocalSR(), plug=True, mode="RO") except Exception, ex: if "All VBDs of type 'disk' must be read/write for HVM guests" in str( ex): log("Read only disk failed to attach to the Windows machine") else: raise else: raise xenrt.XRTFailure( "Read only disk attached to Windows successfully")
def SVVPDTMServerInstall(self, arglist=None): self.targetGuest.changeCD(self.ISO_NAME) xenrt.sleep(30) DTMResScript = r""" Delay(20000); var expString="No tasks are running"; var WshShellObj = new ActiveXObject("WScript.Shell"); for(var count=0; count<1100; count++) { var WshShellExecObj2 = WshShellObj.Exec("tasklist /FI \"imagename eq Kitsetup.exe*\""); var kitSetUpStats=WshShellExecObj2.StdOut.ReadAll(); var n=kitSetUpStats.indexOf(expString); if(n!=-1){ Delay(20000); var fs= new ActiveXObject('Scripting.FileSystemObject'); var WshShellObj = new ActiveXObject("WScript.Shell"); var WshShellExecObj = WshShellObj.Exec("tasklist /FI \"imagename eq DTMService.exe*\""); var kitSetUpStats=WshShellExecObj.StdOut.ReadAll(); var n1=kitSetUpStats.indexOf(expString); var WshShellExecObj = WshShellObj.Exec("tasklist /FI \"imagename eq sqlservr.exe*\""); var kitSetUpStats=WshShellExecObj.StdOut.ReadAll(); var n2=kitSetUpStats.indexOf(expString); var WshShellExecObj = WshShellObj.Exec("tasklist /FI \"imagename eq WLKSvc.exe*\""); var kitSetUpStats=WshShellExecObj.StdOut.ReadAll(); var n3=kitSetUpStats.indexOf(expString); if(n1==-1 && n2==-1 && n3==-1){ var fs= new ActiveXObject('Scripting.FileSystemObject'); var fname1=fs.CreateTextFile("c:\\DTMServiceResult.txt", true); } break; } Delay(5000); } function Delay(milliseconds) { var start = new Date().getTime(); for (var i = 0; i < 1e7; i++) { if ((new Date().getTime() - start) > milliseconds){ break; } } } """ DTMInstallScript = r""" d:\\Kitsetup.exe /ui-level express /install {A6E93EA5-52E2-4F16-8AB2-A3A97533FE83} d:\\Kitsetup.exe /ui-level express /install {EF30275E-5D68-40D2-8AF2-2665AAFCB555} d:\\Kitsetup.exe /ui-level express /install {98EF97A5-C520-498D-8F0F-2C551636E4CC} d:\\Kitsetup.exe /ui-level express /install {8868103C-3527-47FA-A116-84DFD1AE954E} d:\\Kitsetup.exe /ui-level express /install {A9D61D70-94AD-43FF-B770-B05D4A633C34} d:\\Kitsetup.exe /ui-level express /install {14DDB41C-3868-4566-B508-4F20DD649DE4} echo DONE > c:\\DTMResult.txt """ self.targetGuest.xmlrpcWriteFile("c:\\DTMInstallScript.bat", DTMInstallScript) self.targetGuest.xmlrpcStart("c:\\DTMInstallScript.bat") self.timeOut = xenrt.util.timenow() + 7000 while True: try: DTMResult = self.targetGuest.xmlrpcFileExists( "c:\\DTMResult.txt") except Exception, e: xenrt.TEC().warning( "Exception checking for DTMResult text file") xenrt.sleep(300) break if DTMResult: xenrt.TEC().logverbose("DTMResult text file found") self.targetGuest.xmlrpcStart("del /f c:\\DTMInstallScript.bat") self.targetGuest.xmlrpcStart("del /f c:\\DTMResult.txt") break if xenrt.util.timenow() > self.timeOut: raise xenrt.XRTFailure( "Timed out waiting for DTM installationto complete") xenrt.sleep(60)
def pingGuest(self, ip6): # ping a guest on its IPv6 address from the controller retCode = xenrt.command(("ping6 -c 3 -w 10 %s" % ip6), retval="code") if retCode != 0: raise xenrt.XRTFailure("Failed to ping the guest on %s" % ip6)
def updateWindowsVM(self, arglist=None): windUpdateFinsish = r""" var fs= new ActiveXObject('Scripting.FileSystemObject'); var WshShellObj = new ActiveXObject("WScript.Shell"); var fname2=fs.CreateTextFile("c:\\counter.txt", true); var n=0, n1=0; Delay(20000); do { var WshShellExecObj = WshShellObj.Exec('tasklist /FI \"IMAGENAME eq wuauclt.exe\"'); var updateProcess=WshShellExecObj.Stdout.ReadAll(); var count=updateProcess.match(/wuauclt/g); try{ var n=count.length;} catch(exception){ Delay(5000); continue;} fname2.WriteLine(count+"'"+n+"'"); if(n>1) { var fname1=fs.CreateTextFile("c:\\entered2ndloop.txt", true); do { var WshShellExecObj = WshShellObj.Exec('tasklist /FI \"IMAGENAME eq wuauclt.exe\"'); var updateProcess1=WshShellExecObj.Stdout.ReadAll(); var count1=updateProcess1.match(/wuauclt/g); try{ var n1=count1.length;} catch(exception){ Delay(5000); continue;} fname2.WriteLine(count1+"'"+n1+"'"); if(n1==1) { var fname1=fs.CreateTextFile("c:\\WindowsUpdated.txt", true); break; } Delay(5000); }while(true); } if(fs.FileExists("c:\\WindowsUpdated.txt")==true){ break; } Delay(5000); }while(true); function Delay(milliseconds) { var start = new Date().getTime(); for (var i = 0; i < 1e7; i++) { if ((new Date().getTime() - start) > milliseconds){ break; } } } """ startUpdateCheckBatch = r""" c:\windUpdateFinsish.js """ #Upgrade the Guest self.targetGuest.xmlrpcExec( "reg add \"HKLM\\Software\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU\" /v NoAutoUpdate /t REG_DWORD /d 0 /f" ) self.targetGuest.xmlrpcExec( "reg add \"HKLM\\Software\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU\" /v AUOptions /t REG_DWORD /d 3 /f" ) self.targetGuest.xmlrpcExec( "reg add \"HKLM\\Software\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU\" /v NoAutoRebootWithLoggedOnUsers /t REG_DWORD /d 1 /f" ) try: self.targetGuest.xmlrpcExec( "reg delete \"HKLM\\Software\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU\" /v ScheduledInstallDay /f" ) except: pass try: self.targetGuest.xmlrpcExec( "reg delete \"HKLM\\Software\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU\" /v ScheduledInstallTime /f" ) except: pass xenrt.sleep(60) #Restart the windows automatic update service self.targetGuest.xmlrpcExec("net stop wuauserv") self.targetGuest.xmlrpcExec("net start wuauserv") #Detect Windows Updates self.targetGuest.xmlrpcExec("wuauclt /detectnow") xenrt.sleep(600) self.targetGuest.xmlrpcExec("wuauclt /updatenow") self.targetGuest.xmlrpcWriteFile("c:\\windUpdateFinsish.js", windUpdateFinsish) self.targetGuest.xmlrpcWriteFile("c:\\startUpdateCheckBatch.bat", startUpdateCheckBatch) self.targetGuest.xmlrpcStart("c:\\startUpdateCheckBatch.bat") self.timeOut = xenrt.util.timenow() + 20000 while True: try: updateComplete = self.targetGuest.xmlrpcFileExists( "c:\\WindowsUpdated.txt") except Exception, e: xenrt.TEC().warning( "Exception checking for WindowsUpdated text file") xenrt.sleep(300) break if updateComplete: xenrt.TEC().logverbose("DTMResult text file found") self.targetGuest.xmlrpcStart("del /f c:\\WindowsUpdated.txt") break if xenrt.util.timenow() > self.timeOut: raise xenrt.XRTFailure( "Timed out waiting for WindowsUpdated complete") xenrt.sleep(600)
def guest2guestPing(self, srcGuest, destIP): # ping a guest from another guest data = srcGuest.xmlrpcExec("ping -6 -n 3 %s" % destIP, returndata=True) if (not re.search("\(0% loss\)", data) or re.search("Destination host unreachable", data)) : raise xenrt.XRTFailure("Pinging target failed.")
def run(self, arglist=[]): extraDisks = (self.NO_OF_VDIS / self.NO_OF_VMS ) -1 xenrt.TEC().logverbose("Performance testing rawHBA SR on a pool of %d hosts with %d guests, attached with %d extra disks" % (self.NO_OF_HOSTS, self.NO_OF_VMS, extraDisks)) if (len(self.hosts) != self.NO_OF_HOSTS): raise xenrt.XRTFailure("The requisite of the test demands %d hosts in a pool." % (self.NO_OF_HOSTS)) xenrt.TEC().logverbose("Creating a pool of %d hosts." % (self.NO_OF_HOSTS)) # 1. Create the pool of servers. self.pool = xenrt.lib.xenserver.poolFactory(self.hosts[0].productVersion)(self.hosts[0]) self.pool.master = self.hosts[0] # Add all remaining hosts to the pool. for host in self.hosts[1:]: # The host joining the pool cannot contain any shared storage. for sr in host.minimalList("sr-list", args="content-type=iso type=iso"): host.forgetSR(sr) self.pool.addHost(host) self.pool.setPoolParam("name-label", "rawHBAPool") self.pool.check() xenrt.TEC().logverbose("Creating a rawHBA SR on the pool of %d hosts." % (self.NO_OF_HOSTS)) # 2. Create the rawHBA SR. timeNow = xenrt.util.timenow() self.createSR() xenrt.TEC().logverbose("Time taken to create rawHBA SR on pool master with %d LUNs mapped: %s seconds." % (self.NO_OF_VDIS, (xenrt.util.timenow() - timeNow))) # Verifying the rawHBA SR. self.checkSR() # Set the pool default SR to be the RawHBA SR. self.pool.setPoolParam("default-SR", self.sruuid) xenrt.TEC().logverbose("There are %s LUN/VDIs in the test." % len(self.vdiuuids)) if (len(self.vdiuuids) != self.NO_OF_VDIS): raise xenrt.XRTFailure("The requisite of the test demands %d LUN/VDIs in the system." % (self.NO_OF_VDIS)) xenrt.TEC().logverbose("Scanning the rawHBA SR on the pool of %d hosts." % (self.NO_OF_HOSTS)) # 3. Get time taken to scan the rawHBA SR. timeNow = xenrt.util.timenow() self.sr.scan() xenrt.TEC().logverbose("Time taken to scan the rawHBA SR on master with %d LUNs mapped: %s seconds." % (self.NO_OF_VDIS, (xenrt.util.timenow() - timeNow))) # 4. Now find out the time taken to reboot all hosts in the pool. rebootTag = "After creating rawHBA SR Reboot" self.poolReboot(rebootTag) # 5. Create and install number of guessts in parallel using XenRT pfarm. self.vmInstall() # 6. Now attaching extra-disks to each VM from the remaining. self.attachExtraDisks() # 7. Installing IOZone test tool on each guest and running in parallel using XenRT pfarm. self.startIOZoneParallely() # 8. Time taken to shutdown all the guests serially with extra disks attached. vmTag = ("with %d extra disks attached" % (extraDisks)) self.vmShutdown(vmTag) # 9. Time taken to start all the guests serially with extra disks attached. vmTag = ("with %d extra disks attached" % (extraDisks)) self.vmStart(vmTag) # 10 Rebooting all hosts in pool with all guests installed and each guest attached with extra disks. rebootTag = "After installing the guests Reboot" self.poolReboot(rebootTag) # 11. Starting the guests again. vmTag = "after pool reboot" self.vmStart(vmTag) # 12. Time taken to shutdown the guests with extra disks attached. vmTag = "before un-installation" self.vmShutdown(vmTag) # 13. Time taken to uninstall the guests with extra disks attached. vmTag = ("with %d extra disks attached" % (extraDisks)) self.vmUninstall(vmTag) xenrt.TEC().logverbose("Destroying the rawHBA SR on the pool of %d hosts." % (self.NO_OF_HOSTS)) # 14. Destroy the rawHBA SR on the pool. timeNow = xenrt.util.timenow() self.deleteSR() xenrt.TEC().logverbose("Time taken to destroy the rawHBA SR on %d hosts pool: %s seconds." % (self.NO_OF_HOSTS, (xenrt.util.timenow() - timeNow)))
def run(self, arglist=None): commandOutput = self.guest.xmlrpcExec("ipconfig /all", returndata=True) if not len(commandOutput) > 0: raise xenrt.XRTFailure("Failed to contact the guest through xmlrcp")
def _assertNonZeroStatus(self, response): if 'status: non-zero exit' not in response: raise xenrt.XRTFailure('Non Zero status not reported')
def __verifyOutput(self, pluginOutput): if pluginOutput == None or len(pluginOutput) < 1: raise xenrt.XRTFailure("Output from the command was empty") if("Exception" in pluginOutput): raise xenrt.XRTFailure("Exception text found in return value")