def reconfigureToStatic(self, ad=False): data = self.getWindowsIPConfigData() ifname = [x for x in data.keys() if data[x].has_key('IPv4 Address') and (data[x]['IPv4 Address'] == self.machine.ipaddr or data[x]['IPv4 Address'] == "%s(Preferred)" % self.machine.ipaddr)][0] netcfg = xenrt.TEC().lookup(["NETWORK_CONFIG", "DEFAULT"]) cmd = "netsh interface ip set address \"%s\" static %s %s %s 1" % (ifname, self.machine.ipaddr, data[ifname]['Subnet Mask'], [x for x in data[ifname]['Default Gateway'].split() if re.match("\d+\.\d+\.\d+\.\d+", x)][0]) ref = self.xmlrpcStart(cmd) deadline = xenrt.timenow() + 120 while True: try: if self.xmlrpcPoll(ref): break except: pass if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out setting IP to static") xenrt.sleep(5) if ad: dns = xenrt.getADConfig().dns else: dns = xenrt.TEC().config.lookup("XENRT_SERVER_ADDRESS") cmd = "netsh interface ipv4 add dnsservers \"%s\" %s" % (ifname, dns) self.xmlrpcExec(cmd)
def doMessage(self, etype): host = self.getDefaultHost() uuids = host.minimalList("%s-list" % (etype)) if len(uuids) == 0: raise xenrt.XRTError("No entities of type '%s' found" % (etype)) uuid = uuids[0] name = "TC8172_%s_%u" % (etype, xenrt.timenow()) body = "Test message in TC-8172 for %s [%s]" % (etype, uuid) time1 = xenrt.timenow() - 1 host.messageGeneralCreate(etype, uuid, name, body) time2 = xenrt.timenow() + 1 # Check the message messages = host.minimalList("message-list", "uuid", "name=%s" % (name)) if len(messages) == 0: raise xenrt.XRTFailure("Could not find message in list") if len(messages) > 1: raise xenrt.XRTFailure("Found multiple messages in list") m = messages[0] h = host.genParamGet("message", m, "class") if h.lower() != etype: raise xenrt.XRTFailure("Message has incorrect class") u = host.genParamGet("message", m, "obj-uuid") if u != uuid: raise xenrt.XRTFailure("Message has incorrect UUID") b = host.genParamGet("message", m, "body") if b != body: raise xenrt.XRTFailure("Message has incorrect body") ts = host.genParamGet("message", m, "timestamp") tsint = xenrt.parseXapiTime(ts) if tsint < time1 or tsint > time2: raise xenrt.XRTFailure("Message timestamp does not match the time " "it was created", "TS %u created in [%u, %u]" % (tsint, time1, time2))
def checkCluster(self): # Check every host can see every other host in the cluster if len(self.hosts) == 1: return deadline = xenrt.timenow() + 600 while True: ready = True for checkHost in self.hosts: with self.getMelioClient(checkHost) as melioClient: # See which other servers we're connected to servers = melioClient.get_all()['network_session'] # We don't always get a dictionary back if it's empty if not isinstance(servers, dict): ready = False else: # Check we're connected to every other host (except ourselves) for expectedHost in self.hosts: if expectedHost == checkHost: continue if not expectedHost.getName() in [x['computer_name'] for x in servers.values()]: ready = False # No point in continuing break if not ready: # No point in continuing break if ready: # All done break if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out waiting for all of the cluster to appear") # Sleep for 20 seconds before trying again xenrt.sleep(20)
def doMessage(self, etype): host = self.getDefaultHost() uuids = host.minimalList("%s-list" % (etype)) if len(uuids) == 0: raise xenrt.XRTError("No entities of type '%s' found" % (etype)) uuid = uuids[0] name = "TC8172_%s_%u" % (etype, xenrt.timenow()) body = "Test message in TC-8172 for %s [%s]" % (etype, uuid) time1 = xenrt.timenow() - 1 host.messageGeneralCreate(etype, uuid, name, body) time2 = xenrt.timenow() + 1 # Check the message messages = host.minimalList("message-list", "uuid", "name=%s" % (name)) if len(messages) == 0: raise xenrt.XRTFailure("Could not find message in list") if len(messages) > 1: raise xenrt.XRTFailure("Found multiple messages in list") m = messages[0] h = host.genParamGet("message", m, "class") if h.lower() != etype: raise xenrt.XRTFailure("Message has incorrect class") u = host.genParamGet("message", m, "obj-uuid") if u != uuid: raise xenrt.XRTFailure("Message has incorrect UUID") b = host.genParamGet("message", m, "body") if b != body: raise xenrt.XRTFailure("Message has incorrect body") ts = host.genParamGet("message", m, "timestamp") tsint = xenrt.parseXapiTime(ts) if tsint < time1 or tsint > time2: raise xenrt.XRTFailure( "Message timestamp does not match the time " "it was created", "TS %u created in [%u, %u]" % (tsint, time1, time2))
def waitForSystemVmsReady(self): deadline = xenrt.timenow() + 1200 while True: systemvms = self.cloudApi.listSystemVms() or [] startingvms = [x for x in systemvms if x.state == "Starting"] systemvmhosts = [ x for x in self.cloudApi.listHosts() or [] if x.name in [y.name for y in systemvms] ] if systemvmhosts: # At least one host object has been created downhosts = [x for x in systemvmhosts if x.state != "Up"] if not downhosts and not startingvms: # All up, complete xenrt.TEC().logverbose("All System VMs ready") return else: if downhosts: xenrt.TEC().logverbose( "%s not up" % ", ".join([x.name for x in downhosts])) if startingvms: xenrt.TEC().logverbose( "%s starting" % ", ".join([x.name for x in startingvms])) else: xenrt.TEC().logverbose("No system VMs present yet") if xenrt.timenow() > deadline: raise xenrt.XRTError("Waiting for system VMs timed out") xenrt.sleep(15)
def reconfigureToStatic(self, ad=False): data = self.getWindowsIPConfigData() ifname = [ x for x in data.keys() if data[x].has_key('IPv4 Address') and ( data[x]['IPv4 Address'] == self.machine.ipaddr or data[x] ['IPv4 Address'] == "%s(Preferred)" % self.machine.ipaddr) ][0] netcfg = xenrt.TEC().lookup(["NETWORK_CONFIG", "DEFAULT"]) cmd = "netsh interface ip set address \"%s\" static %s %s %s 1" % ( ifname, self.machine.ipaddr, data[ifname]['Subnet Mask'], [ x for x in data[ifname]['Default Gateway'].split() if re.match("\d+\.\d+\.\d+\.\d+", x) ][0]) ref = self.xmlrpcStart(cmd) deadline = xenrt.timenow() + 120 while True: try: if self.xmlrpcPoll(ref): break except: pass if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out setting IP to static") xenrt.sleep(5) if ad: dns = xenrt.getADConfig().dns else: dns = xenrt.TEC().config.lookup("XENRT_SERVER_ADDRESS") cmd = "netsh interface ipv4 add dnsservers \"%s\" %s" % (ifname, dns) self.xmlrpcExec(cmd)
def enableAlertonHost(self, host, perfmon=PERFMON, alarmLevel="4E30", alarmTriggerPeriod="60", alarmAutoInhibitPeriod="300"): """This defaults to setting an alert with repeat time of 1 min and perfmon poll time of 5 min xe host-param-set other-config: perfmon='<config><variable><name value="network_usage" /><alarm_trigger_level value="2" /> <alarm_trigger_period value="30" /><alarm_auto_inhibit_period value="300" /></variable></config>' """ # Start on a clean slate self.deleteAllAlarms(host) # Check if alarm of requested type is already enabled try: enableFlag=host.getHostParam("other-config","perfmon") except: enableFlag = "" if re.search(perfmon, enableFlag, re.IGNORECASE): self.origXrtTime = xenrt.timenow() return cmdXML ="<config><variable><name value=\'%s\' />" \ "<alarm_trigger_level value=\'%s\' />" \ "<alarm_trigger_period value=\'%s\' />" \ "<alarm_auto_inhibit_period value=\'%s\' />" \ "</variable></config>" % \ (perfmon, alarmLevel, alarmTriggerPeriod, alarmAutoInhibitPeriod) param="other-config:perfmon" host.setHostParam(param, cmdXML) self.origXrtTime = xenrt.timenow() xenrt.log("Alert enabled at time - %s" % self.origXrtTime) xenrt.log("Verify the other-config parameters for perfmon") self.verifyOtherConfigCmd(host)
def setupMelioDisk(self): # Setup a melio disk on the scsi device disk = self.hosts[0].execdom0("realpath %s" % self.device).strip()[5:] with self.getMelioClient(self.hosts[0]) as melioClient: deadline = xenrt.timenow() + 600 while True: data = melioClient.get_all() unmanaged = data.get('unmanaged_disk') xenrt.TEC().logverbose("Unmanaged disks: %s" % json.dumps(unmanaged, indent=2)) if unmanaged: disksToManage = [x for x in unmanaged if x['system_name'] == disk] else: disksToManage = [] if disksToManage: diskToManage = disksToManage[0] break if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out waiting for disk to appear") xenrt.sleep(10) melioClient.manage_disk(diskToManage['system_name']) deadline = xenrt.timenow() + 600 while True: managedDisks = melioClient.get_all()['managed_disk'] guid = [x for x in managedDisks.keys() if managedDisks[x]['system_name'] == disk][0] if int(managedDisks[guid]['state']) == 2: break if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out waiting for disk to get to state 2") xenrt.sleep(10) self.guid = melioClient.create_volume(guid.lstrip("_"), managedDisks[guid]['free_space']) self.getSanDeviceForHost(self.hosts[0]) tasks = [xenrt.PTask(self.rebootAndWait, x) for x in self.hosts[1:]] xenrt.pfarm(tasks)
def checkXapiResponsive(self, host): # Check that xapi is responsive on the specified host for i in xrange(20): start = xenrt.timenow() host.getCLIInstance().execute("vm-list") if xenrt.timenow() - start > 10: raise xenrt.XRTError("vm-list took > 10 seconds after installing melio")
def runViaDaemon(self, remote, arglist): self.remote = remote if self.remote.xmlrpcWindowsVersion() < "6.0": xenrt.TEC().logverbose("Installing .Net 2.") self.remote.installDotNet2() self.workdir = self.remote.xmlrpcTempDir() self.remote.xmlrpcUnpackTarball(\ "%s/ndistest.tgz" % (xenrt.TEC().lookup("TEST_TARBALL_BASE")), self.workdir) if self.remote.xmlrpcGetArch() == "amd64": xenrt.TEC().comment("Running 64-bit test.") execdir = "%s\\ndistest\\%sndistest64\\ndistest.net" % \ (self.workdir, self.SUBDIR) else: execdir = "%s\\ndistest\\%sndistest32\\ndistest.net" % \ (self.workdir, self.SUBDIR) ref = self.remote.xmlrpcStart("cd %s\ncscript runmpe.vbs" % (execdir)) time.sleep(300) # Keep checking we're OK. try: finishtime = xenrt.timenow() + 14400 # 4 hours. while True: if xenrt.timenow() > finishtime: raise xenrt.XRTFailure("NDIS test hasn't finished in a " "long time.") time.sleep(30) remote.waitForDaemon(1800, level=xenrt.RC_OK) try: pslist = remote._xmlrpc().ps() except: xenrt.TEC().logverbose("PS failed. Probably hibernating " "again.") else: if not "ndistest.exe" in pslist: xenrt.TEC().logverbose("Appear to have successfully " "completed.") break else: xenrt.TEC().logverbose("NDIS test still seems to be " "running.") # Check return code and fetch any results rc = self.remote.xmlrpcReturnCode(ref) log = self.remote.xmlrpcLog(ref) xenrt.TEC().logverbose(log) finally: try: # Grab the log file tree logsubdir = "%s/ndistestlogs" % (xenrt.TEC().getLogdir()) os.makedirs(logsubdir) self.remote.xmlrpcFetchRecursive("%s\\logs" % (execdir), logsubdir) except Exception, e: xenrt.TEC().warning("Exception fetching ndis logs: %s" % (str(e)))
def pollAsyncJob(self, jobid, timeout=1800): deadline = xenrt.timenow() + timeout while xenrt.timenow() <= deadline: result = self.checkAsyncJob(jobid) if result: return result xenrt.sleep(15) raise xenrt.XRTError("Timed out waiting for response")
def run(self, arglist): # Reboot the guest before we start to check that we can reset the PCI device self.guest.reboot() deadline = xenrt.timenow() + self.duration while True: self.fio.runCheck() if xenrt.timenow() > deadline: break
def run(self, arglist): initial = self.getXAPIpmem() xenrt.TEC().logverbose("Initial XAPI pmem: %s" % (initial)) end = xenrt.timenow() + self.DURATION while xenrt.timenow() < end: current = self.getXAPIpmem() xenrt.TEC().logverbose("Current XAPI pmem: %s" % (current)) if current / initial > 1.5: raise xenrt.XRTFailure("XAPI may be leaking memory.") time.sleep(30)
def run(self, arglist): initial = self.getXAPIpmem() xenrt.TEC().logverbose("Initial XAPI pmem: %s" % (initial)) end = xenrt.timenow() + self.DURATION while xenrt.timenow() < end: current = self.getXAPIpmem() xenrt.TEC().logverbose("Current XAPI pmem: %s" % (current)) if current/initial > 1.5: raise xenrt.XRTFailure("XAPI may be leaking memory.") time.sleep(30)
def run(self, arglist=None): xapiRdpObj = XapiRdp(self.guest) # Disable the RDP on the guest. step(" Test is trying to set fDenyTSConnections on the guest to disable RDP") self.guest.winRegAdd('HKLM', 'System\\CurrentControlSet\\Control\\Terminal Server\\', 'fDenyTSConnections',"DWORD", 1) xenrt.sleep(10) # Make sure RDP disabled field updated. if xapiRdpObj.isRdpEnabled(): raise xenrt.XRTFailure("Guest agent does not updated data/ts about the RDP status change for the guest %s " % (self.guest)) xenrt.TEC().logverbose("Guest agent updated the RDP status in data/ts successfully for the guest %s" % (self.guest)) # Take snapshot of the guest step("Test trying to take the snapshot( memory+disk ) of the guest") checkpoint = self.guest.checkpoint() # Enable the RDP on the guest if not xapiRdpObj.enableRdp(): raise xenrt.XRTFailure("XAPI failed to enable the RDP on the guest %s with tools installed" % (self.guest)) xenrt.TEC().logverbose("XAPI successfully enabled the RDP for the guest: %s " % (self.guest)) xenrt.sleep(10) # Make sure RDP enabled field updated if not xapiRdpObj.isRdpEnabled(): raise xenrt.XRTFailure("Guest agent does not updated data/ts about the RDP status change for the guest %s " % (self.guest)) xenrt.TEC().logverbose("Guest agent updated the RDP status in data/ts successfully for the guest %s" % (self.guest)) # Revert to snapshot step("Test reverting the guest snapshot") self.guest.revert(checkpoint) self.guest.resume() # When we revert to snapshot RDP should be in disabled state # We wait 60mins hoping data/ts will be updated by the guest agent started = xenrt.timenow() finishat = started + 3600 while finishat > xenrt.timenow() and xapiRdpObj.isRdpEnabled(): xenrt.sleep(10) if xapiRdpObj.isRdpEnabled(): raise xenrt.XRTFailure("Guest agent for %s not updated the data/ts until 60 mins after reverting to snapshot" % (self.guest)) xenrt.TEC().logverbose("Guest agent for %s took %d seconds to update data/ts after reverting to snapshot" % (self.guest,xenrt.timenow()-started)) # Enable the RDP if not xapiRdpObj.enableRdp(): raise xenrt.XRTFailure("XAPI failed to enable the RDP on the guest %s with tools installed" % (self.guest)) xenrt.TEC().logverbose("XAPI successfully enabled the RDP for the guest: %s " % (self.guest)) xenrt.sleep(10) # Make sure RDP enabled field updated if not xapiRdpObj.isRdpEnabled(): raise xenrt.XRTFailure("Guest agent does not updated data/ts about the RDP status change for the guest %s " % (self.guest)) xenrt.TEC().logverbose("Guest agent updated the RDP status in data/ts successfully for the guest %s" % (self.guest)) self.guest.checkHealth()
def run(self, arglist=None): #Mount the nfs SR nfs = self.getnfs() self.init() #Create this vmpp1 with params vmpp = self.pool.createVMPP(self.vmpp1['name'], self.vmpp1['btype'], self.vmpp1['bfreq']) self.pool.setVMPPParam(vmpp, 'backup-retention-value', str(self.vmpp1['brtnt'])) self.pool.setVMPPParam(vmpp, 'archive-target-type', self.vmpp1['atype']) self.pool.setVMPPParam(vmpp, 'archive-target-config:location', nfs['rpath']) self.pool.setVMPPParam(vmpp, 'archive-frequency', self.vmpp1['afreq']) params = self.vmpp1.get('params', {}) for key,val in params.iteritems(): self.pool.setVMPPParam(vmpp, key, val) #Disable the policy self.pool.setVMPPParam(vmpp, 'is-policy-enabled', "false") #Assign VM1 and VM2 to this policy self.guest1.paramSet("protection-policy", vmpp) self.guest2.paramSet("protection-policy", vmpp) #Enable the policy self.pool.setVMPPParam(vmpp, 'is-policy-enabled', 'true') vmppconf = self.pool.getVMPPConf(vmpp=vmpp) timenow = xenrt.timenow() timeout = timenow + 7200 #check the VMPR archive is concurrent while xenrt.timenow() < timeout: #store the output of VMPRlogs in a string para = self.master.execdom0("tail -10 /var/log/VMPRlog") xenrt.TEC().logverbose("The VMPRlog contents before strip are %s" % para) #parse the log to filter for the pattern matching subpara = re.sub( r'\d|-|:|\[|\]|\.|\$', "", str(para)) subpara = re.sub( r'%s localhost VMPR ' % (time.strftime("%b", (time.localtime(time.time())))), "", str(subpara)) xenrt.TEC().logverbose("The VMPRlog contents after strip are %s" % subpara) #match the pattern "In single_archive \n In single_archive" flag = re.search("In single_archive\nIn single_archive", subpara, flags=0) if flag: xenrt.TEC().logverbose("VMs export via VMPR are concurrent") break if not flag: xenrt.TEC().logverbose("VMs export via VMPR are not concurrent verified via logs") #end up the running setup self.waitVMPPEvent(self.pool, vmpp, "%s=/=%s" % ('backup-last-run-time', vmppconf['backup-last-run-time'])) self.waitVMPPEvent(self.pool, vmpp, "is-backup-running=false") self.waitVMPPEvent(self.pool, vmpp, "%s=/=%s" % ('archive-last-run-time', vmppconf['archive-last-run-time'])) self.waitVMPPEvent(self.pool, vmpp, "is-archive-running=false") self.pool.setVMPPParam(vmpp, 'is-policy-enabled', 'false') #After the test of 2 hours clean the whole setup self.cleanup()
def _osParent_pollPowerState(self, state, timeout=600, level=xenrt.RC_FAIL, pollperiod=15): """Poll for reaching the specified state""" deadline = xenrt.timenow() + timeout while 1: status = self.getPowerState() if state == status: return if xenrt.timenow() > deadline: xenrt.XRT("Timed out waiting for VM %s to be %s" % (self.name, state), level) xenrt.sleep(15, log=False)
def poll(self, state, timeout=600, level=xenrt.RC_FAIL, pollperiod=5): """Poll our VM for reaching the specified state""" deadline = xenrt.timenow() + timeout while True: status = self.getState() if state == status: return if xenrt.timenow() > deadline: xenrt.XRT("Timed out waiting for VM %s to be %s" % (self.name, state), level) time.sleep(pollperiod)
def run(self, arglist): # Start netperf transfers in each direction wintolin = self.winguest.xmlrpcStart("c:\\netperf.exe -H 169.254.0.2 " "-t TCP_STREAM -l %u -v 0 " "-P 0" % (self.DURATION)) lintowin = self.winguest.xmlrpcStart("c:\\netperf.exe -H 169.254.0.2 " "-t TCP_MAERTS -l %u -v 0 " "-P 0" % (self.DURATION)) started = xenrt.timenow() shouldend = started + self.DURATION - 300 deadline = started + self.DURATION + 600 # Check for VM health, continued operation and completion wintolindata = None lintowindata = None while True: # Check if tests still running if wintolin and self.winguest.xmlrpcPoll(wintolin): wintolindata = self.winguest.xmlrpcLog(wintolin) xenrt.TEC().logverbose("Windows to Linux output:") xenrt.TEC().logverbose(wintolindata) if xenrt.timenow() < shouldend: raise xenrt.XRTFailure("Windows to Linux transfer ended " "early") rc = self.winguest.xmlrpcReturnCode(wintolin) if rc != 0: raise xenrt.XRTFailure("Windows to Linux transfer returned" " %d" % (rc)) wintolin = None if lintowin and self.winguest.xmlrpcPoll(lintowin): lintowindata = self.winguest.xmlrpcLog(lintowin) xenrt.TEC().logverbose("Linux to Windows output:") xenrt.TEC().logverbose(lintowindata) if xenrt.timenow() < shouldend: raise xenrt.XRTFailure("Linux to Windows transfer ended " "early") rc = self.winguest.xmlrpcReturnCode(lintowin) if rc != 0: raise xenrt.XRTFailure("Linux to Windows transfer returned" " %d" % (rc)) lintowin = None if not wintolin and not lintowin: break # Check for timeout if xenrt.timenow() > deadline: raise xenrt.XRTFailure("Transfers still running after deadline") # Check VMs are OK xenrt.TEC().logverbose("Perform routine health checks") self.winguest.checkHealth() self.linguest.checkHealth() time.sleep(300)
def monitorHealth(self, frequency, duration): # Monitor all VMs and hosts for health for the specified duration (in # minutes), checking at the specified frequency (in minutes) started = xenrt.timenow() while True: if xenrt.timenow() > (started + (duration * 60)): break for host in self.hosts: host.checkHealth() for guest in self.guests[host.getName()]: guest.checkHealth() time.sleep(frequency * 60)
def waitForCCP(self): deadline = xenrt.timenow() + 600 while True: try: pods = self.cloud.marvin.cloudApi.listPods() break except: if xenrt.timenow() > deadline: raise xenrt.XRTFailure("Cloudstack Management did not come back after 10 minutes") xenrt.sleep(15) for pod in pods: self.waitForHostState(podid=pod.id, state='Up', timeout=600)
def verifyNoMulUpload(self): cli = self.pool.master.getCLIInstance() lastSucUploadTs=cli.execute("pool-param-get","uuid=%s param-name=\"%s\" param-key=\"%s\"" % (self.pool.getUUID(),"health-check-config","LastSuccessfulUpload"),strip=True) xenrt.log(lastSucUploadTs) now = xenrt.timenow() while xenrt.timenow() < (now + self.services[0].UPLOAD_TIMEINT_MINS*60 ): curSucUploadTs=cli.execute("pool-param-get","uuid=%s param-name=\"%s\" param-key=\"%s\"" % (self.pool.getUUID(),"health-check-config","LastSuccessfulUpload"),strip=True) if curSucUploadTs == lastSucUploadTs: xenrt.log("No new upload observed as Expected") xenrt.sleep(120) else: raise xenrt.XRTFailure("New upload observed at timestamp %s"%lastSucUploadTs)
def monitorHealth(self,frequency,duration): # Monitor all VMs and hosts for health for the specified duration (in # minutes), checking at the specified frequency (in minutes) started = xenrt.timenow() while True: if xenrt.timenow() > (started + (duration * 60)): break for host in self.hosts: host.checkHealth() for guest in self.guests[host.getName()]: guest.checkHealth() time.sleep(frequency*60)
def checkPathCount(self, host, disabled=False): """Verify the host multipath path count for every device""" if disabled: expectedDevicePaths = self.AVAILABLE_PATHS - (self.PATH_FACTOR * self.AVAILABLE_PATHS) pathState = "disabling" else: expectedDevicePaths = self.AVAILABLE_PATHS pathState = "enabling" xenrt.TEC().logverbose("checkPathCount on %s after %s the path" % (host, pathState)) deadline = xenrt.timenow() + 120 # 120 seconds correctPathCount = False for attempt in range(1, self.ATTEMPTS + 1): xenrt.TEC().logverbose("Finding the device paths. Attempt %s " % (attempt)) mpaths = host.getMultipathInfo(onlyActive=True) if len(mpaths) != self.EXPECTED_MPATHS: raise xenrt.XRTFailure( "Incorrect number of devices (attempt %s) " " Found (%s) Expected: %s" % ((attempt), len(mpaths), self.EXPECTED_MPATHS)) deviceMultipathCountList = [ len(mpaths[scsiid]) for scsiid in mpaths.keys() ] xenrt.TEC().logverbose("deviceMultipathCountList : %s" % str(deviceMultipathCountList)) if not len( set(deviceMultipathCountList) ) > 1: # ensures that all the entries in the list is same. if expectedDevicePaths in deviceMultipathCountList: # expcted paths. if (xenrt.timenow() > deadline): xenrt.TEC().warning( "Time to report that all the paths have changed is more than 2 minutes" ) correctPathCount = True break xenrt.sleep(0.5) if not correctPathCount: raise xenrt.XRTFailure( "Incorrect number of device paths found even after attempting %s times" % attempt)
def waitForCCP(self): deadline = xenrt.timenow() + 600 while True: try: pods = self.cloud.marvin.cloudApi.listPods() break except: if xenrt.timenow() > deadline: raise xenrt.XRTFailure( "Cloudstack Management did not come back after 10 minutes" ) xenrt.sleep(15) for pod in pods: self.waitForHostState(podid=pod.id, state='Up', timeout=600)
def waitVMPPEvent(self, pool, vmpp, condition, timeout=3600): start = xenrt.timenow() deadline = start + timeout freq = 300 args = [] args.append("class=vmpp") args.append("uuid=%s" % vmpp) args.append(condition) cli = pool.getCLIInstance() # Cope with event-wait bug while xenrt.timenow() < deadline: rc = cli.execute("event-wait", args=" ".join(args), timeout=freq, level=xenrt.RC_OK, retval="code") if rc == 0: return raise xenrt.XRTFailure("Wait VMPP event %s timed out" % condition)
def run(self, arglist=None): # Let the srspammer run for a while and generate some traffic on the SR xenrt.sleep(self.VARIANCE + self.VARIANCE) # Hack to fool the test that the alert was enabled now self.origXrtTime = xenrt.timenow() ret = self.verifyAlertMsgs( host=self.host, alarmAutoInhibitPeriod=self.ALARMAUTOINIHIBITPERIOD, perfmon="sr_io_throughput") if not ret: # Force a read of the perfmon xenrt.TEC().logverbose( "Trying a force read of the perfmon to see if any alerts present" ) self.host.execdom0( "xe host-call-plugin host-uuid=%s plugin=perfmon fn=refresh" % self.host.uuid) time.sleep(60) ret = self.verifyAlertMsgs( host=self.host, alarmAutoInhibitPeriod=self.ALARMAUTOINIHIBITPERIOD, perfmon="sr_io_throughput", priority=self.PRIORITY) if not ret: raise xenrt.XRTFailure( 'No alerts raised even after the force read of perfmon %s' % self.PERFMON) if not "local" in self.SRTYPE.lower(): self.additionalVerification(host=self.host, msglist=self.MESSAGES)
def getSanDeviceForHost(self, host): with self.getMelioClient(host) as melioClient: deadline = xenrt.timenow() + 600 while True: exportedDevice = melioClient.get_all()['exported_device'] if isinstance(exportedDevice, dict): if self.guid in exportedDevice.keys(): sanDevice = exportedDevice[self.guid]['system_name'] break elif "_%s" % self.guid in exportedDevice.keys(): sanDevice = exportedDevice["_%s" % self.guid]['system_name'] break if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out waiting for device to appear") xenrt.sleep(10) return sanDevice
def _osParent_pollPowerState(self, state, timeout=600, level=xenrt.RC_FAIL, pollperiod=15): """Poll for reaching the specified state""" deadline = xenrt.timenow() + timeout while 1: status = self.getPowerState() if state == status: return if xenrt.timenow() > deadline: xenrt.XRT( "Timed out waiting for VM %s to be %s" % (self.name, state), level) xenrt.sleep(15, log=False)
def verifyService(self, pool, abstime, timeout=300): #return true or False as per the upload cli = pool.master.getCLIInstance() scheduledUpload = abstime uploadTimeLimit = scheduledUpload + self.UPLOAD_TIMEINT_MINS * 60 + timeout #Between uploadtime and uploadtime+Uploadinterval+Buffer I expect upload to happen while xenrt.timenow() < uploadTimeLimit: lastSucUploadTs = cli.execute( "pool-param-get", "uuid=%s param-name=\"%s\" param-key=\"%s\"" % (pool.getUUID(), "health-check-config", "LastSuccessfulUpload"), strip=True) xenrt.log(lastSucUploadTs) lastSucUploadAbs = int( calendar.timegm( time.strptime( lastSucUploadTs.split('.')[0], "%Y-%m-%dT%H:%M:%S"))) if lastSucUploadAbs and lastSucUploadAbs > scheduledUpload and lastSucUploadAbs < uploadTimeLimit: xenrt.log("Upload for pool %s successfully took place at %s" % (pool.getName(), lastSucUploadTs)) return True else: xenrt.sleep(30) xenrt.log( "No upload observed for pool %s during the expected timeframe" % pool.getName()) xenrt.log("Last Successful Upload for pool %s took place at %s" % (pool.getName(), lastSucUploadTs)) return False
def run(self,arglist=None): self.startTime=xenrt.timenow() xenrt.TEC().logverbose("Initiation time %s" %(self.startTime)) xenrt.TEC().logverbose("Start Time = %s"%(self.host.execdom0("date"))) self.endTime = self.duration + self.startTime while self.endTime > xenrt.timenow() : vmop = random.randint(0,len(self.vmoperationlist)-1) if self.runSubcase(self.vmoperationlist[vmop], (), self.guest1.distro +"_" + self.guest1.getName() ,self.vmoperationlist[vmop]) != xenrt.RESULT_PASS: self.failures.append("Vm Operation %s failed " %(self.vmoperationlist[vmop])) self.pauseandResume() if len(self.failures) > 0: raise xenrt.XRTFailure("Multiple opearations failed: %s" % self.failures) else : xenrt.TEC().logverbose("All VMOperations passed.")
def cycle(self, fallback=False): xenrt.TEC().logverbose("Power cycling machine %s" % (self.machine.name)) # Some ILO controllers have broken serial on boot if xenrt.TEC().lookupHost(self.machine.name, "SERIAL_DISABLE_ON_BOOT",False, boolean=True) and self.machine.consoleLogger: self.machine.consoleLogger.pauseLogging() # Wait a random delay to try to avoid power surges when testing # with multiple machines. if self.antiSurge: xenrt.sleep(random.randint(0, 20)) currentPower = self.getPower() if currentPower == "off" and xenrt.TEC().lookupHost(self.machine.name, "RESET_BMC", False, boolean=True): self.ipmi("mc reset cold") deadline = xenrt.timenow() + 120 while xenrt.timenow() < deadline: xenrt.sleep(10) try: self.ipmi("chassis power status") break except: pass if self.machine.consoleLogger: self.machine.consoleLogger.reload() if xenrt.TEC().lookupHost(self.machine.name, "IPMI_SET_PXE",True, boolean=True): try: self.setBootDev("pxe", True) except: xenrt.TEC().logverbose("Warning: failed to set boot dwvice to PXE") offon = xenrt.TEC().lookupHost(self.machine.name, "IPMI_RESET_UNSUPPORTED",False, boolean=True) if offon: if xenrt.TEC().lookupHost(self.machine.name, "IPMI_IGNORE_STATUS", False, boolean=True) or currentPower == "on": self.ipmi("chassis power off") xenrt.sleep(5) self.ipmi("chassis power on") else: if xenrt.TEC().lookupHost(self.machine.name, "IPMI_IGNORE_STATUS", False, boolean=True) or currentPower == "on": self.ipmi("chassis power reset") if xenrt.TEC().lookupHost(self.machine.name, "IPMI_IGNORE_STATUS", False, boolean=True): self.ipmi("chassis power on") else: self.ipmi("chassis power on") # In case the machine was hard powered off
def verifyAlertMsgs(self, host, alarmAutoInhibitPeriod="200", perfmon=PERFMON, aClass=CLASS, priority=PRIORITY): """Make sure that we wait for the alert repeat time interval and then check for alert listings""" xenrt.log("verifyAlertMsgs: The time that the alert was enabled is %s" % self.origXrtTime) timeDiff = xenrt.timenow() - self.origXrtTime temp=alarmAutoInhibitPeriod - timeDiff xenrt.log("verifyAlertMsgs: The time difference is %s, will need to sleep" % (temp) ) # Allow 100 as a variance while (timeDiff < alarmAutoInhibitPeriod+self.VARIANCE): if (timeDiff < 0 or alarmAutoInhibitPeriod < timeDiff): break # Allow the configured interval to complete before we check for ALARM messages xenrt.sleep(alarmAutoInhibitPeriod+self.VARIANCE - timeDiff) timeDiff = xenrt.timenow() - self.origXrtTime temp=alarmAutoInhibitPeriod - timeDiff xenrt.log("verifyAlertMsgs: Inside while: The time difference is %s, will need to sleep" % (temp)) self.messageGet(host, aClass=aClass, perfmon=perfmon, priority=priority) if len(self.MESSAGES) >= 2: self.verifyAlertIntervals(host=host, mesages=self.MESSAGES, alarmAutoInhibitPeriod=alarmAutoInhibitPeriod) # Verify the difference in the timestamps to be around 5 min - 300 secs xenrt.TEC().logverbose("verifyAlertMessages, inside len(MESSAGES) >1: Returning true") return True elif len(self.MESSAGES) == 1: xenrt.log("Number of alerts available are %s" % len(self.MESSAGES)) self.waitForAlert(alarmAutoInhibitPeriod=alarmAutoInhibitPeriod) self.messageGet(host=host, aClass=aClass, perfmon=perfmon, priority=priority) # Verify the difference in the timestamps to be around 5 min - 300 secs self.verifyAlertIntervals(host, mesages=self.MESSAGES, alarmAutoInhibitPeriod=alarmAutoInhibitPeriod) xenrt.TEC().logverbose("verifyAlertMessages, inside len(MESSAGES)==1: Returning true") return True xenrt.TEC().logverbose("MessageGet: Returning False") return False
def run(self, arglist=None): duration = 3600 # 1 hour. finishtime = xenrt.timenow() + duration cli = self.host.getCLIInstance() iteration = 0 while True: xenrt.TEC().progress("Starting clone iteration %s." % (iteration)) args = [] args.append("uuid=%s" % (self.guest.getUUID())) args.append("new-name-label=clone-%s" % (iteration)) uuid = cli.execute("vm-clone", string.join(args), strip=True) args = [] args.append("uuid=%s" % (uuid)) args.append("--force") cli.execute("vm-uninstall", string.join(args)) iteration = iteration + 1 if xenrt.timenow() > finishtime: break
def run(self, arglist=None): machine = "RESOURCE_HOST_0" if arglist and len(arglist) > 0: machine = arglist[0] host = xenrt.TEC().registry.hostGet(machine) if not host: raise xenrt.XRTError("Unable to find host %s in registry" % (machine)) self.getLogsFrom(host) # Reboot the host if xenrt.TEC().lookup("PERFRUN", False, boolean=True): start = xenrt.timenow() host.reboot() if xenrt.TEC().lookup("PERFRUN", False, boolean=True): finish = xenrt.timenow() xenrt.TEC().value("Reboot", finish - start, "s")
def run(self, arglist=None): machine = "RESOURCE_HOST_0" if arglist and len(arglist) > 0: machine = arglist[0] host = xenrt.TEC().registry.hostGet(machine) if not host: raise xenrt.XRTError("Unable to find host %s in registry" % (machine)) self.getLogsFrom(host) # Reboot the host if xenrt.TEC().lookup("PERFRUN", False, boolean=True): start = xenrt.timenow() host.reboot() if xenrt.TEC().lookup("PERFRUN", False, boolean=True): finish = xenrt.timenow() xenrt.TEC().value("Reboot", finish-start, "s")
def verifyDeleteGzFile(self, arglist): #Verify gzipped files are deleted and in proper order when /var/log size goes beyond 500MB before next minute. deadline = xenrt.timenow() + self.TIMEOUT while True: varLogSize = int(int(self.host.execdom0("du /var/log/ | tail -1").split()[0])/1024) if varLogSize < self.MAXVARLOGSIZE: xenrt.TEC().logverbose("Creating gzipped files to increase the size of /var/log/ %sMB beyond %sMB " %(varLogSize ,self.MAXVARLOGSIZE )) for i in range(8): self.host.execdom0(" cd /var/log/ && dd if=/dev/zero of=%s.gz bs=1M count=20" % (xenrt.randomGuestName())) xenrt.sleep(1) varLogSize = int(int(self.host.execdom0("du /var/log/ | tail -1").split()[0])/1024) xenrt.TEC().logverbose("Size of '/var/log' is %sMB after creating gzipped files to increase size beyond %sMB " %(varLogSize ,self.MAXVARLOGSIZE )) if varLogSize > self.MAXVARLOGSIZE: cmd = "cd /var/log/ && ls -t -1 *.gz" gzFilesOld = self.host.execdom0(cmd).split("\n")[:-1] xenrt.TEC().logverbose("gz files ordered by time of creation before deletion by log handler %s" %(gzFilesOld)) xenrt.sleep(self.MAXTIMEPERIOD) varLogSize = int(int(self.host.execdom0("du /var/log/ | tail -1").split()[0])/1024) xenrt.TEC().logverbose(varLogSize) if varLogSize > self.MAXVARLOGSIZE: raise xenrt.XRTError("Failed to delete to compressed gz when /var/log/ size %sMB which is more than %sMB" %(varLogSize ,self.MAXVARLOGSIZE )) else: xenrt.TEC().logverbose("gz files are deleted to bring down the size of /var/log size %sMB below %sMB" %(varLogSize ,self.MAXVARLOGSIZE )) gzFilesNew = self.host.execdom0(cmd).split("\n")[:-1] xenrt.TEC().logverbose("gz files ordered by time of creation after deletion by log handler %s" %(gzFilesNew)) for i in range(len(gzFilesOld)-len(gzFilesNew)): gzFilesOld.pop() if (gzFilesOld == gzFilesNew): xenrt.TEC().logverbose("gz files are deleted in proper order by log handler %s" %(gzFilesOld)) break else: raise xenrt.XRTError("Log handler Failed to delete gz files in the proper order") if xenrt.timenow() > deadline: raise xenrt.XRTFailure("Timed out while verifying the order of deletion of gz files ")
def createVirtualSwitch(self, eth): ps = """Import-Module Hyper-V $ethernet = Get-NetAdapter | where {$_.MacAddress -eq "%s"} New-VMSwitch -Name externalSwitch -NetAdapterName $ethernet.Name -AllowManagementOS $true -Notes 'Parent OS, VMs, LAN' """ % self.getNICMACAddress(eth).replace(":","-") self.xmlrpcWriteFile("c:\\createvirtualswitch.ps1", ps) self.enablePowerShellUnrestricted() cmd = "powershell.exe c:\\createvirtualswitch.ps1" ref = self.xmlrpcStart(cmd) deadline = xenrt.timenow() + 120 while True: try: if self.xmlrpcPoll(ref): break except: pass if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out setting IP to static") xenrt.sleep(5)
def createVirtualSwitch(self, eth): ps = """Import-Module Hyper-V $ethernet = Get-NetAdapter | where {$_.MacAddress -eq "%s"} New-VMSwitch -Name externalSwitch -NetAdapterName $ethernet.Name -AllowManagementOS $true -Notes 'Parent OS, VMs, LAN' """ % self.getNICMACAddress(eth).replace(":", "-") self.xmlrpcWriteFile("c:\\createvirtualswitch.ps1", ps) self.enablePowerShellUnrestricted() cmd = "powershell.exe c:\\createvirtualswitch.ps1" ref = self.xmlrpcStart(cmd) deadline = xenrt.timenow() + 120 while True: try: if self.xmlrpcPoll(ref): break except: pass if xenrt.timenow() > deadline: raise xenrt.XRTError("Timed out setting IP to static") xenrt.sleep(5)
def checkPathCount(self, host, disabled=False): """Verify the host multipath path count for every device""" if disabled: expectedDevicePaths = self.AVAILABLE_PATHS - (self.PATH_FACTOR * self.AVAILABLE_PATHS) pathState = "disabling" else: expectedDevicePaths = self.AVAILABLE_PATHS pathState = "enabling" xenrt.TEC().logverbose("checkPathCount on %s after %s the path" % (host, pathState)) deadline=xenrt.timenow()+ 120 # 120 seconds correctPathCount = False for attempt in range(1, self.ATTEMPTS+1): xenrt.TEC().logverbose("Finding the device paths. Attempt %s " % (attempt)) mpaths = host.getMultipathInfo(onlyActive=True) if len(mpaths) != self.EXPECTED_MPATHS: raise xenrt.XRTFailure("Incorrect number of devices (attempt %s) " " Found (%s) Expected: %s" % ((attempt), len(mpaths), self.EXPECTED_MPATHS)) deviceMultipathCountList = [len(mpaths[scsiid]) for scsiid in mpaths.keys()] xenrt.TEC().logverbose("deviceMultipathCountList : %s" % str(deviceMultipathCountList)) if not len(set(deviceMultipathCountList)) > 1: # ensures that all the entries in the list is same. if expectedDevicePaths in deviceMultipathCountList: # expcted paths. if(xenrt.timenow() > deadline): xenrt.TEC().warning("Time to report that all the paths have changed is more than 2 minutes") correctPathCount = True break xenrt.sleep(0.5) if not correctPathCount: raise xenrt.XRTFailure("Incorrect number of device paths found even after attempting %s times" % attempt)
def enableAlertonHost(self, host, perfmon=PERFMON, alarmLevel="4E30", alarmTriggerPeriod="60", alarmAutoInhibitPeriod="300"): """This defaults to setting an alert with repeat time of 1 min and perfmon poll time of 5 min xe host-param-set other-config: perfmon='<config><variable><name value="network_usage" /><alarm_trigger_level value="2" /> <alarm_trigger_period value="30" /><alarm_auto_inhibit_period value="300" /></variable></config>' """ # Start on a clean slate self.deleteAllAlarms(host) # Check if alarm of requested type is already enabled try: enableFlag = host.getHostParam("other-config", "perfmon") except: enableFlag = "" if re.search(perfmon, enableFlag, re.IGNORECASE): self.origXrtTime = xenrt.timenow() return cmdXML ="<config><variable><name value=\'%s\' />" \ "<alarm_trigger_level value=\'%s\' />" \ "<alarm_trigger_period value=\'%s\' />" \ "<alarm_auto_inhibit_period value=\'%s\' />" \ "</variable></config>" % \ (perfmon, alarmLevel, alarmTriggerPeriod, alarmAutoInhibitPeriod) param = "other-config:perfmon" host.setHostParam(param, cmdXML) self.origXrtTime = xenrt.timenow() xenrt.log("Alert enabled at time - %s" % self.origXrtTime) xenrt.log("Verify the other-config parameters for perfmon") self.verifyOtherConfigCmd(host)
def runViaDaemon(self, remote, arglist): duration = 3600 if arglist and len(arglist) > 0: duration = int(arglist[0]) guest = remote self.storedguest = guest # Get a working directory on the guest workdir = guest.xmlrpcTempDir() # Unpack the test binaries guest.xmlrpcUnpackTarball("%s/%s.tgz" % (xenrt.TEC().lookup("TEST_TARBALL_BASE"), self.testname), workdir) # Start the test id = guest.xmlrpcStart("cd %s\\prime95\nprime95.exe -t" % (workdir)) started = xenrt.timenow() finishat = started + duration time.sleep(30) if guest.xmlrpcPoll(id): raise xenrt.XRTError("prime95 did not start properly") # Wait for the specified duration while finishat > xenrt.timenow(): if guest.xmlrpcPoll(id): raise xenrt.XRTFailure("prime95 has stopped running") time.sleep(30) # Kill it guest.xmlrpcKillAll("prime95.exe") time.sleep(10) if not guest.xmlrpcPoll(id): raise xenrt.XRTError("prime95 did not terminate properly")
def checkGrace(self, host): licenseInfo = host.getLicenseDetails() if not 'grace' in licenseInfo['grace']: xenrt.TEC().logverbose('Host has not got grace license') return False expiry = xenrt.util.parseXapiTime(licenseInfo['expiry']) if (expiry > (xenrt.timenow() + 30 * 25 * 3600 + 1)): raise xenrt.TEC().logverbose( "Host has got license expiry date more than 30 days from current time, it has got expiry date: %s " % expiry) return True
def checkGraceFunc(self, host): licenseInfo = host.getLicenseDetails() if not 'grace' in licenseInfo['grace']: xenrt.TEC().warning("ERROR: Host has not got grace license") return False expiry = xenrt.util.parseXapiTime(licenseInfo['expiry']) if (expiry > (xenrt.timenow() + 30 * 25 * 3600 + 1)): xenrt.TEC().warning( "ERROR: Host has got license expiry date > 30 days from current time," " it has got expiry date: %s " % expiry) return False return True
def run(self): try: self.starttime = xenrt.timenow() self.guest = xenrt.lib.xenserver.guest.createVM(\ self.host, xenrt.randomGuestName(), self.distro, memory=self.memory, vcpus=self.vcpus, vifs=xenrt.lib.xenserver.Guest.DEFAULT) #self.guest.installDrivers() except Exception, e: xenrt.TEC().logverbose("Exception while performing a VM install") traceback.print_exc(file=sys.stderr) self.exception = e
def prepare(self, arglist=None): self.host = self.getDefaultHost() self.guest = self.host.createGenericLinuxGuest() self.uninstallOnCleanup(self.guest) self.events = [] self.guest.shutdown() time1 = xenrt.timenow() - 1 self.guest.start() time2 = xenrt.timenow() + 1 self.events.append(("VM_STARTED", time1, time2)) xenrt.sleep(30) time1 = xenrt.timenow() - 1 self.guest.reboot() time2 = xenrt.timenow() + 1 self.events.append(("VM_REBOOTED", time1, time2)) xenrt.sleep(30) time1 = xenrt.timenow() - 1 self.guest.suspend() time2 = xenrt.timenow() + 1 self.events.append(("VM_SUSPENDED", time1, time2)) xenrt.sleep(30) time1 = xenrt.timenow() - 1 self.guest.resume() time2 = xenrt.timenow() + 1 self.events.append(("VM_RESUMED", time1, time2)) xenrt.sleep(30) time1 = xenrt.timenow() - 1 self.guest.shutdown() time2 = xenrt.timenow() + 1 self.events.append(("VM_SHUTDOWN", time1, time2)) xenrt.sleep(30)
def verifyNoMulUpload(self): cli = self.pool.master.getCLIInstance() lastSucUploadTs = cli.execute( "pool-param-get", "uuid=%s param-name=\"%s\" param-key=\"%s\"" % (self.pool.getUUID(), "health-check-config", "LastSuccessfulUpload"), strip=True) xenrt.log(lastSucUploadTs) now = xenrt.timenow() while xenrt.timenow() < (now + self.services[0].UPLOAD_TIMEINT_MINS * 60): curSucUploadTs = cli.execute( "pool-param-get", "uuid=%s param-name=\"%s\" param-key=\"%s\"" % (self.pool.getUUID(), "health-check-config", "LastSuccessfulUpload"), strip=True) if curSucUploadTs == lastSucUploadTs: xenrt.log("No new upload observed as Expected") xenrt.sleep(120) else: raise xenrt.XRTFailure("New upload observed at timestamp %s" % lastSucUploadTs)