def resume(self, on=None): xenrt.xrtAssert(self.getPowerState() == xenrt.PowerState.suspended, "Power state before resume down must be suspended") self.toolstack.resumeInstance(self, on) self.os.waitForBoot(60) xenrt.xrtCheck(self.getPowerState() == xenrt.PowerState.up, "Power state after resume should be up")
def prepare(self, arglist): self.cloud = self.getDefaultToolstack() self.instances = [] zones = self.cloud.marvin.cloudApi.listZones() xenrt.xrtAssert( len(zones) == 1, 'There must be 1 and only 1 zone configured for this test-case') self.zoneid = zones[0].zoneid pods = self.cloud.marvin.cloudApi.listPods() xenrt.xrtAssert( len(pods) == 1, 'There must be 1 and only 1 pod configured for this test-case') self.podid = pods[0].id if self.VERIFY_USER_INSTANCES: distros = ['centos59_x86-32', 'win7sp1-x86'] self.instances = self.createInstances(zoneName=zones[0].name, distroList=distros, instancesPerDistro=1) map(lambda x: x.assertHealthy(), self.instances) # Check all System VMs are ok before the test is run self.waitForSystemVmAgentState(self.podid, state='Up', timeout=60) args = self.parseArgsKeyValue(arglist) storageVMName = args.has_key('storageVM') and args['storageVM'] or None self.storageVM = xenrt.TEC().registry.guestGet(storageVMName) self.storageVM.checkHealth()
def diffZoneCapacityWithCurrent(self, zoneid, oldCapacityData=[], capacityTypeIdList=None): """Compare CCP reported capacity data with a previous reading. When called with just zoneid specifed this will just return the current capacity data""" newCapacityData = self.cloud.marvin.cloudApi.listCapacity( zoneid=zoneid) capacityDataChanged = False for oldCapacity in oldCapacityData: newCapacity = filter(lambda x: x.type == oldCapacity.type, newCapacityData) xenrt.xrtAssert( len(newCapacity) == 1, 'Inconsistent capacity data reported by MS') if oldCapacity.__dict__ != newCapacity[0].__dict__: diffLogStr = 'DIFF IGNORED: ' if capacityTypeIdList == None or oldCapacity.type in capacityTypeIdList: diffLogStr = 'DIFF DETECTED: ' capacityDataChanged = True xenrt.TEC().logverbose( diffLogStr + 'TYPE: %d: Old capacity: %s, new capacity: %s' % (oldCapacity.type, pformat(oldCapacity), pformat(newCapacity[0]))) return (capacityDataChanged, newCapacityData)
def prepare(self, arglist): self.templates = [] self.instances = [] self.cloud = self.getDefaultToolstack() args = self.parseArgsKeyValue(arglist) self.noUpdate = args.has_key('noupdate') and args['noupdate']=='true' zones = self.cloud.marvin.cloudApi.listZones() xenrt.xrtAssert(len(zones) == 1, 'There must be 1 and only 1 zone configured for this test-case') self.zoneid = zones[0].id clusters = self.cloud.marvin.cloudApi.listClusters(zoneid=self.zoneid) xenrt.xrtAssert(len(clusters) == 1, 'There must be 1 and only 1 cluster configured for this test-case') self.cluster = clusters[0] existingTemplates = self.cloud.marvin.cloudApi.listTemplates(templatefilter='all', zoneid=self.zoneid) self.templates = filter(lambda x:x.templatetype != 'SYSTEM' and x.templatetype != 'BUILTIN', existingTemplates) hostList = self.cloud.marvin.cloudApi.listHosts(clusterid=self.cluster.id, type='Routing') xenrt.TEC().logverbose('Updating hosts %s' % (pformat(map(lambda x:x.name, hostList)))) instancesPerTemplate = ( (len(hostList) - 1) * 3 ) + 2 xenrt.TEC().logverbose('Creating %d instances per template in list %s' % (instancesPerTemplate, pformat(map(lambda x:x.name, self.templates)))) self._logCapacity() # Create instances for template in self.templates: self.instances += map(lambda x:self.cloud.createInstanceFromTemplate(templateName=template.name, name='preUp-%s-%d' % (self._getTemplateNameStr(template), x)), range(instancesPerTemplate)) xenrt.TEC().logverbose('Created the following instances: %s' % (pformat(map(lambda x:x.name, self.instances)))) self._logCapacity()
def stop(self, force=False, osInitiated=False): xenrt.xrtAssert(self.getPowerState() == xenrt.PowerState.up, "Power state before shutting down must be up") if osInitiated: self.os.shutdown() self._osParent_pollPowerState(xenrt.PowerState.down) else: self.toolstack.stopInstance(self, force) xenrt.xrtCheck(self.getPowerState() == xenrt.PowerState.down, "Power state after shutdown should be down")
def managementIp(self): if not self.__managementIp: mgmtIpData = filter(lambda x:'NetScaler IP' in x, self.cli('show ns ip')) xenrt.xrtAssert(len(mgmtIpData) == 1, 'The NetScaler only has one management interface defined') managementIp = re.search('(\d{1,3}\.){3}\d{1,3}', mgmtIpData[0]).group(0) xenrt.xrtAssert(managementIp == self.__vpxGuest.mainip, 'The IP address of the guest matches the reported Netscaler management IP address') self.__managementIp = managementIp return self.__managementIp
def __init__(self, name, toolstack, hypervisorHosts): self.cloud = toolstack self.hypervisorHosts = hypervisorHosts self.name = name systemVmData = self.cloud.marvin.cloudApi.listSystemVms(name=self.name) xenrt.xrtAssert( len(systemVmData) == 1, 'System VM with name %s not found' % (name))
def reboot(self, force=False, timeout=600, osInitiated=False): xenrt.xrtAssert(self.getPowerState() == xenrt.PowerState.up, "Power state before rebooting must be up") if osInitiated: self.os.reboot() xenrt.sleep(120) else: self.toolstack.rebootInstance(self, force) self.os.waitForBoot(timeout) xenrt.xrtCheck(self.getPowerState() == xenrt.PowerState.up, "Power state after reboot should be up")
def _doResiliencyTest(self, arglist): args = self.parseArgsKeyValue(arglist) xenrt.xrtAssert( args.has_key('systemvmoperation'), 'TCSystemVMOpsResiliency requires an systemvmoperation argument') xenrt.xrtAssert( hasattr(self.systemVm, args['systemvmoperation']), 'systemvmoperation must be a valid method on SystemVM object') getattr(self.systemVm, args['systemvmoperation'])()
def isLicensed(self, feature=None): if not feature: # Use LB as default feature = 'Load Balancing' licData = filter(lambda x:x.startswith(feature), self.cli('show ns license')) xenrt.xrtAssert(len(licData) == 1, 'There is an entry for the specified feature in the NS license data') licensed = licData[0].split(':')[1].strip() == 'YES' xenrt.TEC().logverbose('NetScaler feature: %s license state = %s' % (feature, licensed)) return licensed
def applyLicense(self, localLicensePath): """Apply the license file specified and apply the license""" xenrt.xrtAssert(self.__vpxGuest.getState() == 'UP', 'NetScaler license can only be applied on a running VPX') sftp = self.__vpxGuest.sftpClient(username='******') sftp.copyTo(localLicensePath, os.path.join('/nsconfig/license', os.path.basename(localLicensePath))) sftp.close() self.reboot() xenrt.xrtAssert(self.isLicensed, 'NetScaler reports being licensed after license file is applied')
def reboot(self): xenrt.xrtAssert(self.__vpxGuest.getState() == 'UP', 'NetScaler VPX reboot can only be done on a running VPX') self.__vpxGuest.lifecycleOperation('vm-reboot') # Wait / Check for SSH connectivity self.__vpxGuest.waitForSSH(timeout=300, username='******', cmd='shell') # On ESX host VM comes up and goes down for around a minute if isinstance(self.__vpxGuest, xenrt.lib.esx.Guest): xenrt.sleep(60) self.__vpxGuest.waitForSSH(timeout=120, username='******', cmd='shell')
def cli(self, command, level=xenrt.RC_FAIL): """Helper method for creating specific NetScaler CLI command methods""" xenrt.xrtAssert(self.__vpxGuest.getState() == 'UP', 'NetScaler CLI Commands can only be executed on a running VPX') data = self.__vpxGuest.execguest(command, username='******', password='******', level=level) if type(data) == type(1): return data = map(lambda x:x.strip(), filter(lambda x:not x.startswith(' Done'), data.splitlines())) xenrt.TEC().logverbose('NetScaler Command [%s] - Returned: %s' % (command, '\n'.join(data))) return data
def migrate(self, to, live=True): xenrt.xrtAssert(self.getPowerState() == xenrt.PowerState.up, "Power state before migrate must be up") self.toolstack.migrateInstance(self, to, live) self.os.waitForBoot(60) xenrt.xrtCheck(self.getPowerState() == xenrt.PowerState.up, "Power state after migrate should be up") xenrt.xrtCheck(self.residentOn == to, "Resident on after migrate should be %s" % to)
def osFromExisting(parent, password=None): detectionState = DetectionState(password) for o in oslist: try: ret = o.runDetect(parent, detectionState) except OSNotDetected: continue else: xenrt.xrtAssert(ret, "No object returned for detected OS") return ret raise xenrt.XRTError("Could not determine OS")
def _logCapacity(self): zones = self.cloud.marvin.cloudApi.listZones(id=self.zoneid) xenrt.xrtAssert(len(zones) == 1, 'There must be 1 and only 1 zone for the stored zone ID') capacityTypeId = 8 if zones[0].networktype == 'Advanced': capacityTypeId = 4 capacityList = self.cloud.marvin.cloudApi.listCapacity(zoneid=self.zoneid, type=capacityTypeId) if capacityList == None or len(capacityList) != 1: xenrt.TEC().logverbose('Unable to read CCP capacity') xenrt.TEC().logverbose('CCP Address Capacity - Total: %d, Used: %d' % (capacityList[0].capacitytotal, capacityList[0].capacityused))
def _populateParam(self): self.cloud = self.getDefaultToolstack() self._cloudApi = self.cloud.cloudApi self._hypervisors = self.cloud.getAllHypervisors() self._systemVMs = self._cloudApi.listSystemVms() self._clusters = self._cloudApi.listClusters() self._pods = self._cloudApi.listPods() xenrt.xrtAssert(len(self._pods) == 1, 'There must be 1 and only 1 pod configured for this test-case') self._hostsInClusters = [] for cluster in self._clusters: self._hostsInClusters.append(self.cloud.getAllHostInClusterByClusterId(cluster.id))
def reboot(self): xenrt.xrtAssert( self.__vpxGuest.getState() == 'UP', 'NetScaler VPX reboot can only be done on a running VPX') self.__vpxGuest.lifecycleOperation('vm-reboot') # Wait / Check for SSH connectivity self.__vpxGuest.waitForSSH(timeout=300, username='******', cmd='shell') # On ESX host VM comes up and goes down for around a minute if isinstance(self.__vpxGuest, xenrt.lib.esx.Guest): xenrt.sleep(60) self.__vpxGuest.waitForSSH(timeout=120, username='******', cmd='shell')
def prepare(self, arglist): self.templates = [] self.instances = [] self.cloud = self.getDefaultToolstack() args = self.parseArgsKeyValue(arglist) self.noUpdate = args.has_key('noupdate') and args['noupdate'] == 'true' zones = self.cloud.marvin.cloudApi.listZones() xenrt.xrtAssert( len(zones) == 1, 'There must be 1 and only 1 zone configured for this test-case') self.zoneid = zones[0].id clusters = self.cloud.marvin.cloudApi.listClusters(zoneid=self.zoneid) xenrt.xrtAssert( len(clusters) == 1, 'There must be 1 and only 1 cluster configured for this test-case') self.cluster = clusters[0] existingTemplates = self.cloud.marvin.cloudApi.listTemplates( templatefilter='all', zoneid=self.zoneid) self.templates = filter( lambda x: x.templatetype != 'SYSTEM' and x.templatetype != 'BUILTIN', existingTemplates) hostList = self.cloud.marvin.cloudApi.listHosts( clusterid=self.cluster.id, type='Routing') xenrt.TEC().logverbose('Updating hosts %s' % (pformat(map(lambda x: x.name, hostList)))) instancesPerTemplate = ((len(hostList) - 1) * 3) + 2 xenrt.TEC().logverbose( 'Creating %d instances per template in list %s' % (instancesPerTemplate, pformat(map(lambda x: x.name, self.templates)))) self._logCapacity() # Create instances for template in self.templates: self.instances += map( lambda x: self.cloud.createInstanceFromTemplate( templateName=template.name, name='preUp-%s-%d' % (self._getTemplateNameStr(template), x)), range(instancesPerTemplate)) xenrt.TEC().logverbose( 'Created the following instances: %s' % (pformat(map(lambda x: x.name, self.instances)))) self._logCapacity()
def isLicensed(self, feature=None): if not feature: # Use LB as default feature = 'Load Balancing' licData = filter(lambda x: x.startswith(feature), self.cli('show ns license')) xenrt.xrtAssert( len(licData) == 1, 'There is an entry for the specified feature in the NS license data' ) licensed = licData[0].split(':')[1].strip() == 'YES' xenrt.TEC().logverbose('NetScaler feature: %s license state = %s' % (feature, licensed)) return licensed
def prepare(self, arglist): args = self.parseArgsKeyValue(arglist) self.cloud = self.getDefaultToolstack() pods = self.cloud.marvin.cloudApi.listPods() xenrt.xrtAssert(len(pods) == 1, 'There must be 1 and only 1 pod configured for this test-case') systemVmType = 'secondarystoragevm' if args.has_key('systemvmtype'): systemVmType = args['systemvmtype'] self.systemVm = SystemVM.systemVMFactory(self.cloud, self.getAllHosts(), podid=pods[0].id, systemvmtype=systemVmType)[0] # Check that the system VM is healthy before the test self.systemVm.waitForReady()
def managementIp(self): if not self.__managementIp: mgmtIpData = filter(lambda x: 'NetScaler IP' in x, self.cli('show ns ip')) xenrt.xrtAssert( len(mgmtIpData) == 1, 'The NetScaler only has one management interface defined') managementIp = re.search('(\d{1,3}\.){3}\d{1,3}', mgmtIpData[0]).group(0) xenrt.xrtAssert( managementIp == self.__vpxGuest.mainip, 'The IP address of the guest matches the reported Netscaler management IP address' ) self.__managementIp = managementIp return self.__managementIp
def applyLicense(self, localLicensePath): """Apply the license file specified and apply the license""" xenrt.xrtAssert( self.__vpxGuest.getState() == 'UP', 'NetScaler license can only be applied on a running VPX') sftp = self.__vpxGuest.sftpClient(username='******') sftp.copyTo( localLicensePath, os.path.join('/nsconfig/license', os.path.basename(localLicensePath))) sftp.close() self.reboot() xenrt.xrtAssert( self.isLicensed, 'NetScaler reports being licensed after license file is applied')
def _populateParam(self): self.cloud = self.getDefaultToolstack() self._cloudApi = self.cloud.cloudApi self._hypervisors = self.cloud.getAllHypervisors() self._systemVMs = self._cloudApi.listSystemVms() self._clusters = self._cloudApi.listClusters() self._pods = self._cloudApi.listPods() xenrt.xrtAssert( len(self._pods) == 1, 'There must be 1 and only 1 pod configured for this test-case') self._hostsInClusters = [] for cluster in self._clusters: self._hostsInClusters.append( self.cloud.getAllHostInClusterByClusterId(cluster.id))
def diffZoneCapacityWithCurrent(self, zoneid, oldCapacityData=[], capacityTypeIdList=None): """Compare CCP reported capacity data with a previous reading. When called with just zoneid specifed this will just return the current capacity data""" newCapacityData = self.cloud.marvin.cloudApi.listCapacity(zoneid=zoneid) capacityDataChanged = False for oldCapacity in oldCapacityData: newCapacity = filter(lambda x:x.type == oldCapacity.type, newCapacityData) xenrt.xrtAssert(len(newCapacity) == 1, 'Inconsistent capacity data reported by MS') if oldCapacity.__dict__ != newCapacity[0].__dict__: diffLogStr = 'DIFF IGNORED: ' if capacityTypeIdList == None or oldCapacity.type in capacityTypeIdList: diffLogStr = 'DIFF DETECTED: ' capacityDataChanged = True xenrt.TEC().logverbose(diffLogStr + 'TYPE: %d: Old capacity: %s, new capacity: %s' % (oldCapacity.type, pformat(oldCapacity), pformat(newCapacity[0]))) return (capacityDataChanged, newCapacityData)
def cli(self, command, level=xenrt.RC_FAIL): """Helper method for creating specific NetScaler CLI command methods""" xenrt.xrtAssert( self.__vpxGuest.getState() == 'UP', 'NetScaler CLI Commands can only be executed on a running VPX') data = self.__vpxGuest.execguest(command, username='******', password='******', level=level) if type(data) == type(1): return data = map( lambda x: x.strip(), filter(lambda x: not x.startswith(' Done'), data.splitlines())) xenrt.TEC().logverbose('NetScaler Command [%s] - Returned: %s' % (command, '\n'.join(data))) return data
def _logCapacity(self): zones = self.cloud.marvin.cloudApi.listZones(id=self.zoneid) xenrt.xrtAssert( len(zones) == 1, 'There must be 1 and only 1 zone for the stored zone ID') capacityTypeId = 8 if zones[0].networktype == 'Advanced': capacityTypeId = 4 capacityList = self.cloud.marvin.cloudApi.listCapacity( zoneid=self.zoneid, type=capacityTypeId) if capacityList == None or len(capacityList) != 1: xenrt.TEC().logverbose('Unable to read CCP capacity') xenrt.TEC().logverbose( 'CCP Address Capacity - Total: %d, Used: %d' % (capacityList[0].capacitytotal, capacityList[0].capacityused))
def prepare(self, arglist): args = self.parseArgsKeyValue(arglist) self.cloud = self.getDefaultToolstack() pods = self.cloud.marvin.cloudApi.listPods() xenrt.xrtAssert( len(pods) == 1, 'There must be 1 and only 1 pod configured for this test-case') systemVmType = 'secondarystoragevm' if args.has_key('systemvmtype'): systemVmType = args['systemvmtype'] self.systemVm = SystemVM.systemVMFactory(self.cloud, self.getAllHosts(), podid=pods[0].id, systemvmtype=systemVmType)[0] # Check that the system VM is healthy before the test self.systemVm.waitForReady()
def waitForUserInstanceState(self, instanceNames, state, timeout=300, pollPeriod=20): """Wait for all User Instances (specified) to reach the specified state""" xenrt.xrtAssert(len(instanceNames) > 0, 'No instance names specifed in call to waitForUserInstanceState') allInstancesReachedState = False startTime = datetime.now() while (datetime.now() - startTime).seconds < timeout: instanceData = filter(lambda x:x.name in instanceNames, self.cloud.marvin.cloudApi.listVirtualMachines()) xenrt.xrtAssert(len(instanceNames) == len(instanceData), 'Did not find instance records for all specified isntance names') instancesNotInState = filter(lambda x:x.state != state, instanceData) if len(instancesNotInState) == 0: allInstancesReachedState = True break else: xenrt.TEC().logverbose('Waiting for the following User Instances to reach state %s: %s' % (state, pformat(map(lambda x:(x.name, x.state), instancesNotInState)))) xenrt.sleep(pollPeriod) if not allInstancesReachedState: raise xenrt.XRTFailure('Not all User Instances reached state %s in %d seconds' % (state, timeout))
def verifySystemVMsRecoverFromOutage(self): systemVms = self.cloud.marvin.cloudApi.listSystemVms(podid=self.podid) cpvms = filter(lambda x:x.systemvmtype == 'consoleproxy', systemVms) ssvms = filter(lambda x:x.systemvmtype == 'secondarystoragevm', systemVms) xenrt.xrtAssert(len(cpvms) + len(ssvms) == len(systemVms), 'Unexpected System VM type reported') (ignore, originalCapacity) = self.diffZoneCapacityWithCurrent(zoneid=self.zoneid) # Stop the storage VM (simulate the outage) self.storageVM.shutdown(force=True) # Wait for the system VMs to exit the Up state self.waitForSystemVmAgentState(self.podid, state='Up', timeout=600, exitState=True) self.storageVM.start() # Wait for the system VMs to recover self.waitForSystemVmAgentState(self.podid, state='Up', timeout=1200) # Check the number of system VMs has not changed newsystemVms = self.cloud.marvin.cloudApi.listSystemVms(podid=self.podid) newcpvms = filter(lambda x:x.systemvmtype == 'consoleproxy', newsystemVms) newssvms = filter(lambda x:x.systemvmtype == 'secondarystoragevm', newsystemVms) xenrt.xrtAssert(len(newcpvms) + len(newssvms) == len(newsystemVms), 'Unexpected System VM type reported') xenrt.xrtAssert(len(cpvms) == len(newcpvms), 'Number of Console Proxy VMs not the same after outage') xenrt.xrtAssert(len(ssvms) == len(newssvms), 'Number of Secondary Storage VMs not the same after outage') # Chkec all Hosts are Up and then recheck that all System VMs are still up self.waitForHostState(self.podid, state='Up', timeout=600) self.waitForSystemVmAgentState(self.podid, state='Up', timeout=300) if self.VERIFY_USER_INSTANCES: self.waitForUserInstanceState(instanceNames=map(lambda x:x.name, self.instances), state='Running', timeout=300) for instance in self.instances: try: instance.assertHealthy() except Exception, e: # VMs may fail when their disks are removed - reboot any VMs that are not responding xenrt.TEC().logverbose('Instance: %s health check failed with: %s' % (instance.name, str(e))) xenrt.TEC().logverbose('Reboot instance: %s' % (instance.name)) instance.reboot() self.waitForUserInstanceState(instanceNames=map(lambda x:x.name, self.instances), state='Running', timeout=300) map(lambda x:x.assertHealthy(), self.instances) # Verify that all VRs are running nonRunningVRs = filter(lambda x:x.state != 'Running', self.cloud.marvin.cloudApi.listRouters(listall='true')) if len(nonRunningVRs) > 0: xenrt.TEC().logverbose('VRs not in Running state: %s' % (pformat(map(lambda x:(x.name, x.state), nonRunningVRs)))) raise xenrt.XRTFailure('VR(s) not recovered after Primary Storage Outage')
def waitForUserInstanceState(self, instanceNames, state, timeout=300, pollPeriod=20): """Wait for all User Instances (specified) to reach the specified state""" xenrt.xrtAssert( len(instanceNames) > 0, 'No instance names specifed in call to waitForUserInstanceState') allInstancesReachedState = False startTime = datetime.now() while (datetime.now() - startTime).seconds < timeout: instanceData = filter( lambda x: x.name in instanceNames, self.cloud.marvin.cloudApi.listVirtualMachines()) xenrt.xrtAssert( len(instanceNames) == len(instanceData), 'Did not find instance records for all specified isntance names' ) instancesNotInState = filter(lambda x: x.state != state, instanceData) if len(instancesNotInState) == 0: allInstancesReachedState = True break else: xenrt.TEC().logverbose( 'Waiting for the following User Instances to reach state %s: %s' % (state, pformat( map(lambda x: (x.name, x.state), instancesNotInState)))) xenrt.sleep(pollPeriod) if not allInstancesReachedState: raise xenrt.XRTFailure( 'Not all User Instances reached state %s in %d seconds' % (state, timeout))
def prepare(self, arglist): self.cloud = self.getDefaultToolstack() self.instances = [] zones = self.cloud.marvin.cloudApi.listZones() xenrt.xrtAssert(len(zones) == 1, 'There must be 1 and only 1 zone configured for this test-case') self.zoneid = zones[0].zoneid pods = self.cloud.marvin.cloudApi.listPods() xenrt.xrtAssert(len(pods) == 1, 'There must be 1 and only 1 pod configured for this test-case') self.podid = pods[0].id if self.VERIFY_USER_INSTANCES: distros = ['centos59_x86-32', 'win7sp1-x86'] self.instances = self.createInstances(zoneName=zones[0].name, distroList=distros, instancesPerDistro=1) map(lambda x:x.assertHealthy(), self.instances) # Check all System VMs are ok before the test is run self.waitForSystemVmAgentState(self.podid, state='Up', timeout=60) args = self.parseArgsKeyValue(arglist) storageVMName = args.has_key('storageVM') and args['storageVM'] or None self.storageVM = xenrt.TEC().registry.guestGet(storageVMName) self.storageVM.checkHealth()
def suspend(self): xenrt.xrtAssert(self.getPowerState() == xenrt.PowerState.up, "Power state before suspend down must be up") self.toolstack.suspendInstance(self) xenrt.xrtCheck(self.getPowerState() == xenrt.PowerState.suspended, "Power state after suspend should be suspended")
def __init__(self, name, toolstack, hypervisorHosts): self.cloud = toolstack self.hypervisorHosts = hypervisorHosts self.name = name systemVmData = self.cloud.marvin.cloudApi.listSystemVms(name=self.name) xenrt.xrtAssert(len(systemVmData) == 1, 'System VM with name %s not found' % (name))
def _doResiliencyTest(self, arglist): args = self.parseArgsKeyValue(arglist) xenrt.xrtAssert(args.has_key('systemvmoperation'), 'TCSystemVMOpsResiliency requires an systemvmoperation argument') xenrt.xrtAssert(hasattr(self.systemVm, args['systemvmoperation']), 'systemvmoperation must be a valid method on SystemVM object') getattr(self.systemVm, args['systemvmoperation'])()
def toolstackStart(self, on=None): xenrt.xrtAssert(self.getPowerState() == xenrt.PowerState.down, "Power state before starting must be down") self.toolstack.startInstance(self, on) xenrt.xrtCheck(self.getPowerState() == xenrt.PowerState.up, "Power state after start should be up")
def getSystemVMData(self): systemVmData = self.cloud.marvin.cloudApi.listSystemVms(name=self.name) xenrt.xrtAssert(len(systemVmData) == 1, 'System VM with name %s not found' % (self.name)) return systemVmData[0]
def getSystemVMData(self): systemVmData = self.cloud.marvin.cloudApi.listSystemVms(name=self.name) xenrt.xrtAssert( len(systemVmData) == 1, 'System VM with name %s not found' % (self.name)) return systemVmData[0]
def getManSvrVMData(self): hostData = self.cloud.marvin.cloudApi.listHosts(name=self.name) xenrt.xrtAssert(len(hostData) == 1, 'ManSvr Host record with name %s not found' % (self.name)) return hostData[0]
def verifySystemVMsRecoverFromOutage(self): systemVms = self.cloud.marvin.cloudApi.listSystemVms(podid=self.podid) cpvms = filter(lambda x: x.systemvmtype == 'consoleproxy', systemVms) ssvms = filter(lambda x: x.systemvmtype == 'secondarystoragevm', systemVms) xenrt.xrtAssert( len(cpvms) + len(ssvms) == len(systemVms), 'Unexpected System VM type reported') (ignore, originalCapacity) = self.diffZoneCapacityWithCurrent( zoneid=self.zoneid) # Stop the storage VM (simulate the outage) self.storageVM.shutdown(force=True) # Wait for the system VMs to exit the Up state self.waitForSystemVmAgentState(self.podid, state='Up', timeout=600, exitState=True) self.storageVM.start() # Wait for the system VMs to recover self.waitForSystemVmAgentState(self.podid, state='Up', timeout=1200) # Check the number of system VMs has not changed newsystemVms = self.cloud.marvin.cloudApi.listSystemVms( podid=self.podid) newcpvms = filter(lambda x: x.systemvmtype == 'consoleproxy', newsystemVms) newssvms = filter(lambda x: x.systemvmtype == 'secondarystoragevm', newsystemVms) xenrt.xrtAssert( len(newcpvms) + len(newssvms) == len(newsystemVms), 'Unexpected System VM type reported') xenrt.xrtAssert( len(cpvms) == len(newcpvms), 'Number of Console Proxy VMs not the same after outage') xenrt.xrtAssert( len(ssvms) == len(newssvms), 'Number of Secondary Storage VMs not the same after outage') # Chkec all Hosts are Up and then recheck that all System VMs are still up self.waitForHostState(self.podid, state='Up', timeout=600) self.waitForSystemVmAgentState(self.podid, state='Up', timeout=300) if self.VERIFY_USER_INSTANCES: self.waitForUserInstanceState(instanceNames=map( lambda x: x.name, self.instances), state='Running', timeout=300) for instance in self.instances: try: instance.assertHealthy() except Exception, e: # VMs may fail when their disks are removed - reboot any VMs that are not responding xenrt.TEC().logverbose( 'Instance: %s health check failed with: %s' % (instance.name, str(e))) xenrt.TEC().logverbose('Reboot instance: %s' % (instance.name)) instance.reboot() self.waitForUserInstanceState(instanceNames=map( lambda x: x.name, self.instances), state='Running', timeout=300) map(lambda x: x.assertHealthy(), self.instances) # Verify that all VRs are running nonRunningVRs = filter( lambda x: x.state != 'Running', self.cloud.marvin.cloudApi.listRouters(listall='true')) if len(nonRunningVRs) > 0: xenrt.TEC().logverbose( 'VRs not in Running state: %s' % (pformat(map(lambda x: (x.name, x.state), nonRunningVRs)))) raise xenrt.XRTFailure( 'VR(s) not recovered after Primary Storage Outage')
def getManSvrVMData(self): hostData = self.cloud.marvin.cloudApi.listHosts(name=self.name) xenrt.xrtAssert( len(hostData) == 1, 'ManSvr Host record with name %s not found' % (self.name)) return hostData[0]