def runViaDaemon(self, remote, arglist): # Extract onewin.exe and copy to the VM d = xenrt.TEC().tempDir() xenrt.getTestTarball("videowin", extract=True, copy=False, directory=d) xenrt.command("unzip %s/videowin/videowin.zip VideoWin/OneWin.exe " "-d %s" % (d, d)) workdir = remote.xmlrpcTempDir() remote.xmlrpcSendFile("%s/VideoWin/OneWin.exe" % (d), "%s\\OneWin.exe" % (workdir)) results = {} for iteration in range(9): xenrt.TEC().logdelimit("Iteration %u" % (iteration)) # Run the test on the current video mode try: remote.xmlrpcRemoveFile("%s\\VideoLog.txt" % (workdir)) except: pass try: remote.xmlrpcExec("cd %s\n%s\\OneWin.exe RUN" % (workdir, workdir), timeout=600) except xenrt.XRTFailure, e: # Returns non-zero for some reason xenrt.TEC().logverbose("OneWin.exe returned non-zero: %s" % (str(e))) # Process results. data = str(remote.xmlrpcReadFile("%s\\VideoLog.txt" % (workdir))) f = file("%s/VideoLog-%u.txt" % (xenrt.TEC().getLogdir(), iteration), "w") f.write(data) f.close() # Find the header line lines = data.splitlines() headerindex = None headers = [] for i in range(len(lines)): line = lines[i].strip() ll = line.split() if len(ll) > 0 and ll[0] == "Resolution": headerindex = i headers = ll[1:] break if headerindex == None: raise xenrt.XRTError("Could not find result header line") # Walk through all lines after the header line looking for results for i in range(headerindex + 1, len(lines)): line = lines[i].strip() ll = line.split() if len(ll) == len(headers) + 3: resolution = "%sx%sB%s" % (ll[0], ll[1], ll[2]) for j in range(len(headers)): h = "%s_%s" % (resolution, headers[j]) v = float(ll[j+3]) if not results.has_key(h): results[h] = [] results[h].append(v)
def checkDisk(self, disk): raise xenrt.XRTError("Unimplemented")
def prepare(self, arglist=[]): args = xenrt.util.strlistToDict(arglist) self.VDISIZE = int(args.get("vdisize") or self.VDISIZE) self.host = self.getDefaultHost() local_dir = xenrt.TEC().tempDir() remote_dir = self.host.tempDir() mount_dir = self.host.tempDir() vdi_name = "vdi" + str(int(time.time())) self.vdi_from = self.host.createVDI(self.VDISIZE, name=vdi_name + "_from") self.vdi_to = self.host.createVDI(self.VDISIZE, name=vdi_name + "_to") db_name = "db" + str(int(time.time())) db_tmp = os.path.join(remote_dir, db_name + ".db") db_xml = os.path.join(local_dir, db_name + ".db") self.host.execdom0("xe pool-dump-database file-name=%s" % db_tmp) vdi_key = os.path.join(mount_dir, vdi_name + ".key") self.vdi_img_host = os.path.join(remote_dir, vdi_name + ".img") self.vdi_img_controller = os.path.join(local_dir, vdi_name + ".img") make_key_name = "make_key.sh" make_key_cmd = """#!/bin/sh mkfs -t ext3 /dev/${DEVICE} mount /dev/${DEVICE} %s touch %s umount /dev/${DEVICE}""" % (mount_dir, vdi_key) make_key_tmp = os.path.join(local_dir, make_key_name) fd = open(make_key_tmp, "w") fd.write(make_key_cmd) fd.close() self.make_key_sh = os.path.join(remote_dir, make_key_name) copy_key_name = "copy_key.sh" copy_key_cmd = """#!/bin/sh dd if=/dev/${DEVICE} of=%s""" % (self.vdi_img_host) copy_key_tmp = os.path.join(local_dir, copy_key_name) fd = open(copy_key_tmp, "w") fd.write(copy_key_cmd) fd.close() self.copy_key_sh = os.path.join(remote_dir, copy_key_name) find_key_name = "find_key.sh" find_key_cmd = """#!/bin/sh mount /dev/${DEVICE} %s KEY=%s if [ -e ${KEY} ]; then echo Found key ${KEY}; FLAG=0; else echo Missing key ${KEY}; FLAG=%s; fi umount /dev/${DEVICE} exit $FLAG""" % (mount_dir, vdi_key, self.ERRORCODE) find_key_tmp = os.path.join(local_dir, find_key_name) fd = file(find_key_tmp, "w") fd.write(find_key_cmd) fd.close() self.find_key_sh = os.path.join(remote_dir, find_key_name) sftp = self.host.sftpClient() try: sftp.copyTo(make_key_tmp, self.make_key_sh) sftp.copyTo(find_key_tmp, self.find_key_sh) sftp.copyTo(copy_key_tmp, self.copy_key_sh) finally: sftp.close() self.host.execdom0("chmod a+x " + self.make_key_sh) self.host.execdom0("chmod a+x " + self.find_key_sh) self.host.execdom0("chmod a+x " + self.copy_key_sh) self.host.execdom0("/opt/xensource/debug/with-vdi %s %s" % (self.vdi_from, self.make_key_sh)) if not self.findKey(self.vdi_from): raise xenrt.XRTError("Secret key was not found on the VDI") self.host.execdom0("/opt/xensource/debug/with-vdi %s %s" % (self.vdi_from, self.copy_key_sh)) sftp = self.host.sftpClient() try: sftp.copyFrom(db_tmp, db_xml) sftp.copyFrom(self.vdi_img_host, self.vdi_img_controller) finally: sftp.close() db_dom = xml.dom.minidom.parse(db_xml) vdi_table = [ x for x in db_dom.getElementsByTagName('table') if x.attributes['name'].value == 'VDI' ][0] vdi_entry = [ x for x in vdi_table.getElementsByTagName('row') if x.attributes['uuid'].value == self.vdi_to ][0] self.vdi_ref = vdi_entry.attributes['_ref'].value
def setupManagementServer(self): self.primaryManagementServer.execcmd( 'iptables -I INPUT -p tcp --dport 8096 -j ACCEPT') setupMsLoc = self.primaryManagementServer.execcmd( 'find /usr/bin -name %s-setup-management' % (self.cmdPrefix)).strip() self.primaryManagementServer.execcmd(setupMsLoc) self.primaryManagementServer.execcmd( 'mysql -u cloud --password=cloud -h %s --execute="UPDATE cloud.configuration SET value=8096 WHERE name=\'integration.api.port\'"' % self.dbServer.getIP()) if xenrt.TEC().lookup("USE_CCP_SIMULATOR", False, boolean=True) or self._simDbServer: # For some reason the cloud user doesn't seem to have access to the simulator DB self.primaryManagementServer.execcmd( """sed -i s/db.simulator.username=cloud/db.simulator.username=root/ /usr/share/cloudstack-management/conf/db.properties""" ) self.primaryManagementServer.execcmd( """sed -i s/db.simulator.password=cloud/db.simulator.password=xensource/ /usr/share/cloudstack-management/conf/db.properties""" ) self.primaryManagementServer.execcmd( """sed -i s/db.simulator.host=localhost/db.simulator.host=%s/ /usr/share/cloudstack-management/conf/db.properties""" % self.simDbServer.getIP()) self.restart(checkHealth=False) self.checkManagementServerHealth(timeout=300) # We have to update templates *after* starting the management server as some templates are not introduced until DB schema updates are applied templateSubsts = { "http://download.cloud.com/templates/builtin/centos56-x86_64.vhd.bz2": "%s/cloudTemplates/centos56-x86_64.vhd.bz2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP"), "http://download.cloud.com/releases/4.3/centos6_4_64bit.vhd.bz2": "%s/cloudTemplates/centos6_4_64bit.vhd.bz2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP"), "http://nfs1.lab.vmops.com/templates/centos53-x86_64/latest/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2": "%s/cloudTemplates/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP"), "http://download.cloud.com/templates/builtin/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2": "%s/cloudTemplates/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP"), "http://download.cloud.com/releases/2.2.0/CentOS5.3-x86_64.ova": "%s/cloudTemplates/CentOS5.3-x86_64.ova" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP"), "http://download.cloud.com/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2": "%s/cloudTemplates/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP"), "http://download.cloud.com/templates/builtin/centos-7-x86_64.tar.gz": "%s/cloudTemplates/centos-7-x86_64.tar.gz" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP") } if xenrt.TEC().lookup("MARVIN_BUILTIN_TEMPLATES", False, boolean=True): templateSubsts["http://download.cloud.com/templates/builtin/centos56-x86_64.vhd.bz2"] = \ "%s/cloudTemplates/centos56-httpd-64bit.vhd.bz2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP") templateSubsts["http://download.cloud.com/releases/2.2.0/CentOS5.3-x86_64.ova"] = \ "%s/cloudTemplates/centos53-httpd-64bit.ova" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP") templateSubsts["http://download.cloud.com/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2"] = \ "%s/cloudTemplates/centos55-httpd-64bit.qcow2" % xenrt.TEC().lookup("EXPORT_DISTFILES_HTTP") for t in templateSubsts.keys(): self.primaryManagementServer.execcmd( """mysql -u cloud --password=cloud -h %s --execute="UPDATE cloud.vm_template SET url='%s' WHERE url='%s'" """ % (self.dbServer.getIP(), templateSubsts[t], t)) self.restart() marvinApi = xenrt.lib.cloud.MarvinApi(self) internalMask = IPy.IP("%s/%s" % (xenrt.getNetworkParam( "NPRI", "SUBNET"), xenrt.getNetworkParam("NPRI", "SUBNETMASK"))) if xenrt.TEC().lookup("USE_CCP_SIMULATOR", False, boolean=True) or self._simDbServer: self.primaryManagementServer.execcmd( 'mysql -u root --password=xensource -h %s < /usr/share/cloudstack-management/setup/hypervisor_capabilities.simulator.sql' % self.dbServer.getIP()) self.primaryManagementServer.execcmd( 'mysql -u root --password=xensource -h %s < /usr/share/cloudstack-management/setup/templates.simulator.sql' % self.dbServer.getIP()) marvinApi.setCloudGlobalConfig("secstorage.allowed.internal.sites", internalMask.strNormal()) if not xenrt.TEC().lookup("MARVIN_SETUP", False, boolean=True): marvinApi.setCloudGlobalConfig("use.external.dns", "true") endpoint_url = "http://%s:8096/client/api" % marvinApi.mgtSvrDetails.mgtSvrIp if self.additionalManagementServers: marvinApi.setCloudGlobalConfig("agent.lb.enabled", "true") marvinApi.setCloudGlobalConfig("endpointe.url", endpoint_url) marvinApi.setCloudGlobalConfig("check.pod.cidrs", "false", restartManagementServer=True) xenrt.GEC().dbconnect.jobUpdate("CLOUD_MGMT_SVR_IP", self.primaryManagementServer.getIP()) xenrt.TEC().registry.toolstackPut( "cloud", xenrt.lib.cloud.CloudStack(place=self.primaryManagementServer)) # Create one secondary storage, to speed up deployment. # Additional locations will need to be created during deployment hvlist = xenrt.TEC().lookup("CLOUD_REQ_SYS_TMPLS", None) if hvlist: hvlist = hvlist.split(",") else: hvlist = [] if any( map(lambda hv: hv in hvlist, ["kvm", "xenserver", "vmware", "lxc"])): secondaryStorage = xenrt.ExternalNFSShare() storagePath = secondaryStorage.getMount() url = 'nfs://%s' % (secondaryStorage.getMount().replace(':', '')) marvinApi.copySystemTemplatesToSecondaryStorage(storagePath, "NFS") self.primaryManagementServer.special[ 'initialNFSSecStorageUrl'] = url elif "hyperv" in hvlist: if xenrt.TEC().lookup("EXTERNAL_SMB", False, boolean=True): secondaryStorage = xenrt.ExternalSMBShare() storagePath = secondaryStorage.getMount() url = 'cifs://%s' % (secondaryStorage.getMount().replace( ':', '')) marvinApi.copySystemTemplatesToSecondaryStorage( storagePath, "SMB") self.primaryManagementServer.special[ 'initialSMBSecStorageUrl'] = url if xenrt.TEC().lookup("CCP_CODE_COVERAGE", False, boolean=True): xenrt.TEC().logverbose("Enabling code coverage collection...") for m in self.allManagementServers: if m.execcmd("ls %s/setup_codecoverage.sh" % self.installDir, retval="code") != 0: raise xenrt.XRTError( "CCP_CODE_COVERAGE set but setup_codecoverage.sh not found in build" ) m.execcmd("cd %s && ./setup_codecoverage.sh" % self.installDir) self.restart() xenrt.TEC().logverbose("...done") commit = None try: commit = self.primaryManagementServer.execcmd( "cloudstack-sccs").strip() xenrt.TEC().logverbose( "Management server was built from commit %s" % commit) except: xenrt.TEC().warning( "Error when trying to identify management server version") if commit: expectedCommit = xenrt.getCCPCommit( self.primaryManagementServer.distro) if expectedCommit and commit != expectedCommit: raise xenrt.XRTError( "Management server commit %s does not match expected commit %s" % (commit, expectedCommit))
def setLacpTimeout(self, port, value): raise xenrt.XRTError("LACP timeout not supported on this switch")
class TCMigrate(xenrt.TestCase): WORKLOADS = [ "w_find", "w_memtest", #"w_spamcons", "w_forktest2" ] WINDOWS_WORKLOADS = [ "Prime95", "Ping", "SQLIOSim", "Burnintest", "NetperfTX", "NetperfRX", "Memtest" ] def __init__(self): xenrt.TestCase.__init__(self, "TCMigrate") self.workloads = None self.guest = None self.semclass = "TCMigrate" self.usedclone = False def run(self, arglist=None): loops = 50 live = "false" reboot = False target = None fast = False workloads = None gname = None clonevm = False iterreboot = False # Mandatory args for arg in arglist: l = string.split(arg, "=", 1) if l[0] == "guest": gname = l[1] if l[0] == "loops": loops = int(l[1]) elif l[0] == "live": live = "true" elif l[0] == "reboot": reboot = True elif l[0] == "iterreboot": iterreboot = True elif l[0] == "to": if l[1] != "localhost": target = l[1] elif l[0] == "fast": fast = True elif l[0] == "workloads": if len(l) > 1: workloads = l[1].split(",") else: workloads = self.WINDOWS_WORKLOADS elif l[0] == "config": matching = xenrt.TEC().registry.guestLookup(\ **xenrt.util.parseXMLConfigString(l[1])) for n in matching: xenrt.TEC().comment("Found matching guest(s): %s" % (matching)) if matching: gname = matching[0] elif l[0] == "clone": clonevm = True if not gname: raise xenrt.XRTError("No guest name specified") g = self.getGuest(gname) self.guest = g if g.distro and g.distro in string.split(\ xenrt.TEC().lookup("SKIP_MIGRATE_DISTROS", ""), ","): xenrt.TEC().skip("Skipping migrate on %s" % (g.distro)) return self.getLogsFrom(g.host) if xenrt.TEC().lookup("OPTION_USE_CLONE", False, boolean=True) or clonevm: xenrt.TEC().comment("Using clone to run test.") self.blocker = False if g.getState() != "UP": g.start() g.preCloneTailor() g.shutdown() clone = g.cloneVM() self.guest = clone g = clone self.usedclone = True self.getLogsFrom(g) if target: thost = xenrt.TEC().registry.hostGet(target) if not thost: raise xenrt.XRTError("Cannot find host %s in registry" % (target)) self.getLogsFrom(thost) hostlist = [thost, g.host] xenrt.TEC().comment("Migrating to %s" % (thost.getName())) else: hostlist = [g.host] xenrt.TEC().comment("Performing localhost migrate") if live == "true": xenrt.TEC().progress("Running %d iterations of live migrate " "using %s." % (loops, gname)) else: xenrt.TEC().progress("Running %d iterations of migrate using %s." % (loops, gname)) if fast: xenrt.TEC().comment("Using back to back migrations") try: if g.getState() == "DOWN": xenrt.TEC().comment("Starting guest %s before commencing " "migrate." % (g.name)) g.start() # Make sure the guest is healthy before we start if not g.windows: g.waitForSSH(60, desc="Guest check") else: g.waitForDaemon(60, desc="Guest check") # Make sure there is sufficient memory on the first target freemem = hostlist[0].getFreeMemory() if freemem < g.memory: if xenrt.TEC().lookup("MIGRATE_NOMEM_SKIP", False, boolean=True): xenrt.TEC().skip( "Skipping because of insufficent free " "memory on %s (%u < %u)" % (hostlist[0].getName(), freemem, g.memory)) return else: raise xenrt.XRTError( "Insufficent free " "memory on %s (%u < %u)" % (hostlist[0].getName(), freemem, g.memory)) # Start workloads on the guest if workloads: if g.windows: self.workloads = g.startWorkloads(workloads) else: self.workloads = g.startWorkloads(self.WORKLOADS) except Exception, e: traceback.print_exc(file=sys.stderr) raise xenrt.XRTError("Guest broken before we started (%s)" % (str(e))) success = 0 mt = xenrt.util.Timer() try: for i in range(loops): if xenrt.GEC().abort: xenrt.TEC().warning("Aborting on command") break h = hostlist[i % len(hostlist)] xenrt.TEC().logverbose( "Starting loop iteration %u (to %s)..." % (i, h.getName())) if not fast: domid = g.getDomid() skew1 = g.getClockSkew() g.migrateVM(h, live=live, fast=fast, timer=mt) if not fast: skew2 = g.getClockSkew() time.sleep(10) g.check() if not target: # On localhost make sure we did something if g.getDomid() == domid: raise xenrt.XRTError("Domain ID unchanged after " "migrate.") if skew1 != None and skew2 != None: delta = abs(skew2 - skew1) note = "Before the migrate the skew from controller " \ "time was %fs and afterwards it was %fs" % \ (skew1, skew2) xenrt.TEC().logverbose(note) if delta > 2000000.0: raise xenrt.XRTFailure( "Clock skew detected after " "migrate", note) else: # Check skew now, in these general tests we'll # allow a slight delay for the clock to fix # itself up time.sleep(5) skew3 = g.getClockSkew() delta = abs(skew3 - skew1) if delta > 3.0: note = "Before the suspend the skew from " \ "controller time was %fs and " \ "afterwards it was %fs, a short " \ "while later it was %fs" % \ (skew1, skew2, skew3) xenrt.TEC().warning("Clock skew detected " "after suspend/resume: " + note) success = success + 1 if iterreboot: g.reboot() if workloads: if g.windows: self.workloads = g.startWorkloads(workloads) else: self.workloads = g.startWorkloads(self.WORKLOADS) finally: xenrt.TEC().comment("%u/%u iterations successful." % (success, loops)) if mt.count() > 0: xenrt.TEC().logverbose("Migrate times: %s" % (mt.measurements)) xenrt.TEC().value("MIGRATE_MAX", mt.max()) xenrt.TEC().value("MIGRATE_MIN", mt.min()) xenrt.TEC().value("MIGRATE_AVG", mt.mean()) xenrt.TEC().value("MIGRATE_DEV", mt.stddev()) if fast: time.sleep(10) g.check() if workloads: g.stopWorkloads(self.workloads) try: if reboot: g.reboot() except xenrt.XRTFailure, e: raise xenrt.XRTError(e.reason)
class TCHibernate(xenrt.TestCase): """Tests hibernate initiated from within the guest.""" WORKLOADS = [ "w_find", "w_forktest2", #"w_spamcons", "w_memtest" ] WINDOWS_WORKLOADS = [ "Prime95", "Ping", "SQLIOSim", "Burnintest", "NetperfTX", "NetperfRX", "Memtest" ] def __init__(self): xenrt.TestCase.__init__(self, "TCHibernate") self.blocker = True self.guest = None self.workloads = None self.usedclone = False def run(self, arglist=None): loops = 50 reboot = False workloads = None gname = None clonevm = False for arg in arglist: l = string.split(arg, "=", 1) if l[0] == "guest": gname = l[1] if l[0] == "loops": loops = int(l[1]) if l[0] == "reboot": reboot = True elif l[0] == "workloads": if len(l) > 1: workloads = l[1].split(",") else: workloads = self.WINDOWS_WORKLOADS elif l[0] == "config": matching = xenrt.TEC().registry.guestLookup(\ **xenrt.util.parseXMLConfigString(l[1])) for n in matching: xenrt.TEC().comment("Found matching guest(s): %s" % (matching)) if matching: gname = matching[0] elif l[0] == "clone": clonevm = True if not gname: raise xenrt.XRTError("No guest name specified.") guest = self.getGuest(gname) self.guest = guest host = guest.host self.getLogsFrom(host) if xenrt.TEC().lookup("OPTION_USE_CLONE", False, boolean=True) or clonevm: xenrt.TEC().comment("Using clone to run test.") self.blocker = False if guest.getState() != "UP": guest.start() guest.preCloneTailor() guest.shutdown() clone = guest.cloneVM() self.guest = clone guest = clone self.usedclone = True self.getLogsFrom(guest) if guest.memory >= 4096: xenrt.TEC().skip("Skipping hibernate on > 4GB guest.") return if not guest.windows: xenrt.TEC().skip("Skipping hibernate on non-Windows guest.") return expfail = string.split(host.lookup("EXPFAIL_HIBERNATE", ""), ",") if guest.distro and guest.distro in expfail: xenrt.TEC().skip("Skipping hibernate for %s which is expected " "to fail." % (guest.distro)) return try: # Make sure the guest is up if guest.getState() == "DOWN": xenrt.TEC().comment("Starting guest before commencing loop.") guest.start() # Make sure the guest is healthy before we start. guest.waitForDaemon(60, desc="Guest check") # Start workloads on the guest. if workloads: if guest.windows: self.workloads = guest.startWorkloads(workloads) else: self.workloads = guest.startWorkloads(self.WORKLOADS) except Exception, e: xenrt.TEC().logverbose("Guest broken before we started (%s)." % str(e)) raise # Enable hibernate for Tampa Guests if isinstance(guest, xenrt.lib.xenserver.guest.TampaGuest): guest.paramSet("platform:acpi_s4", "true") guest.reboot() # Enable hibernation. try: guest.winRegAdd( "HKCU", "Software\\Policies\\Microsoft\\Windows\\" "System\\Power", "PromptPasswordOnResume", "DWORD", 0) try: guest.xmlrpcExec( "powercfg.exe /GLOBALPOWERFLAG off /OPTION RESUMEPASSWORD") except: pass except: pass try: guest.xmlrpcExec("powercfg.exe /HIBERNATE ON") except: pass # Test hibernate in a loop success = 0 try: for i in range(loops): xenrt.TEC().logverbose("Starting loop iteration %u..." % (i)) host.listDomains() attempt = 0 while True: try: # Ignore errors since we may get the connection # severed on the down guest.xmlrpcStart("shutdown /h") except: pass try: guest.poll("DOWN", timeout=1200) break except Exception, e: try: # See if the hibernate started, i.e. we can't ping # the execdaemon. guest.checkReachable() except: guest.checkHealth(unreachable=True) raise xenrt.XRTFailure("Hibernate didn't complete") guest.check() if attempt == 2: self.blocker = False raise xenrt.XRTFailure( "Hibernate didn't happen after 3 attempts") else: xenrt.TEC().warning( "Hibernate didn't seem to happen.") attempt = attempt + 1 continue time.sleep(2) host.listDomains() guest.start(skipsniff=True) success = success + 1 finally: self.tec.comment("%u/%u iterations successful" % (success, loops)) # Stop guest workloads. if workloads: guest.stopWorkloads(self.workloads) try: if reboot: guest.reboot() except xenrt.XRTFailure, e: raise xenrt.XRTError(e.reason)
def instanceCanMigrateTo(self, instance): raise xenrt.XRTError("Not implemented")
def instanceResidentOn(self, instance): raise xenrt.XRTError("Not implemented")
def destroyInstance(self, instance): raise xenrt.XRTError("Not implemented")
def getAllExistingInstances(self): raise xenrt.XRTError("Not implemented")
def recover(self, host): raise xenrt.XRTError("Unimplemented")
def outage(self, host, csHost): raise xenrt.XRTError("Unimplemented")
def _doResiliencyTest(self, arglist): raise xenrt.XRTError('This must be overridden in derived classes')
def rebootInstance(self, instance, force=False): hv = self.getHypervisor(instance) if not hv: raise xenrt.XRTError("Instance is not running") hv.rebootInstance(instance.name)
def setInstanceIso(self, instance, isoName, isoRepo): raise xenrt.XRTError("Not implemented")
def run(self, arglist=None): loops = 50 reboot = False workloads = None gname = None clonevm = False for arg in arglist: l = string.split(arg, "=", 1) if l[0] == "guest": gname = l[1] if l[0] == "loops": loops = int(l[1]) if l[0] == "reboot": reboot = True elif l[0] == "workloads": if len(l) > 1: workloads = l[1].split(",") else: workloads = self.WINDOWS_WORKLOADS elif l[0] == "config": matching = xenrt.TEC().registry.guestLookup(\ **xenrt.util.parseXMLConfigString(l[1])) for n in matching: xenrt.TEC().comment("Found matching guest(s): %s" % (matching)) if matching: gname = matching[0] elif l[0] == "clone": clonevm = True if not gname: raise xenrt.XRTError("No guest name specified.") guest = self.getGuest(gname) self.guest = guest host = guest.host self.getLogsFrom(host) if xenrt.TEC().lookup("OPTION_USE_CLONE", False, boolean=True) or clonevm: xenrt.TEC().comment("Using clone to run test.") self.blocker = False if guest.getState() != "UP": guest.start() guest.preCloneTailor() guest.shutdown() clone = guest.cloneVM() self.guest = clone guest = clone self.usedclone = True self.getLogsFrom(guest) if guest.memory >= 4096: xenrt.TEC().skip("Skipping hibernate on > 4GB guest.") return if not guest.windows: xenrt.TEC().skip("Skipping hibernate on non-Windows guest.") return expfail = string.split(host.lookup("EXPFAIL_HIBERNATE", ""), ",") if guest.distro and guest.distro in expfail: xenrt.TEC().skip("Skipping hibernate for %s which is expected " "to fail." % (guest.distro)) return try: # Make sure the guest is up if guest.getState() == "DOWN": xenrt.TEC().comment("Starting guest before commencing loop.") guest.start() # Make sure the guest is healthy before we start. guest.waitForDaemon(60, desc="Guest check") # Start workloads on the guest. if workloads: if guest.windows: self.workloads = guest.startWorkloads(workloads) else: self.workloads = guest.startWorkloads(self.WORKLOADS) except Exception, e: xenrt.TEC().logverbose("Guest broken before we started (%s)." % str(e)) raise
def ejectInstanceIso(self, instance): raise xenrt.XRTError("Not implemented")
def run(self, arglist=None): loops = 50 live = "false" reboot = False target = None fast = False workloads = None gname = None clonevm = False iterreboot = False # Mandatory args for arg in arglist: l = string.split(arg, "=", 1) if l[0] == "guest": gname = l[1] if l[0] == "loops": loops = int(l[1]) elif l[0] == "live": live = "true" elif l[0] == "reboot": reboot = True elif l[0] == "iterreboot": iterreboot = True elif l[0] == "to": if l[1] != "localhost": target = l[1] elif l[0] == "fast": fast = True elif l[0] == "workloads": if len(l) > 1: workloads = l[1].split(",") else: workloads = self.WINDOWS_WORKLOADS elif l[0] == "config": matching = xenrt.TEC().registry.guestLookup(\ **xenrt.util.parseXMLConfigString(l[1])) for n in matching: xenrt.TEC().comment("Found matching guest(s): %s" % (matching)) if matching: gname = matching[0] elif l[0] == "clone": clonevm = True if not gname: raise xenrt.XRTError("No guest name specified") g = self.getGuest(gname) self.guest = g if g.distro and g.distro in string.split(\ xenrt.TEC().lookup("SKIP_MIGRATE_DISTROS", ""), ","): xenrt.TEC().skip("Skipping migrate on %s" % (g.distro)) return self.getLogsFrom(g.host) if xenrt.TEC().lookup("OPTION_USE_CLONE", False, boolean=True) or clonevm: xenrt.TEC().comment("Using clone to run test.") self.blocker = False if g.getState() != "UP": g.start() g.preCloneTailor() g.shutdown() clone = g.cloneVM() self.guest = clone g = clone self.usedclone = True self.getLogsFrom(g) if target: thost = xenrt.TEC().registry.hostGet(target) if not thost: raise xenrt.XRTError("Cannot find host %s in registry" % (target)) self.getLogsFrom(thost) hostlist = [thost, g.host] xenrt.TEC().comment("Migrating to %s" % (thost.getName())) else: hostlist = [g.host] xenrt.TEC().comment("Performing localhost migrate") if live == "true": xenrt.TEC().progress("Running %d iterations of live migrate " "using %s." % (loops, gname)) else: xenrt.TEC().progress("Running %d iterations of migrate using %s." % (loops, gname)) if fast: xenrt.TEC().comment("Using back to back migrations") try: if g.getState() == "DOWN": xenrt.TEC().comment("Starting guest %s before commencing " "migrate." % (g.name)) g.start() # Make sure the guest is healthy before we start if not g.windows: g.waitForSSH(60, desc="Guest check") else: g.waitForDaemon(60, desc="Guest check") # Make sure there is sufficient memory on the first target freemem = hostlist[0].getFreeMemory() if freemem < g.memory: if xenrt.TEC().lookup("MIGRATE_NOMEM_SKIP", False, boolean=True): xenrt.TEC().skip( "Skipping because of insufficent free " "memory on %s (%u < %u)" % (hostlist[0].getName(), freemem, g.memory)) return else: raise xenrt.XRTError( "Insufficent free " "memory on %s (%u < %u)" % (hostlist[0].getName(), freemem, g.memory)) # Start workloads on the guest if workloads: if g.windows: self.workloads = g.startWorkloads(workloads) else: self.workloads = g.startWorkloads(self.WORKLOADS) except Exception, e: traceback.print_exc(file=sys.stderr) raise xenrt.XRTError("Guest broken before we started (%s)" % (str(e)))
def createInstanceSnapshot(self, instance, name, memory=False, quiesce=False): raise xenrt.XRTError("Not implemented")
def prepare(self, arglist=None): self.pool = self.getDefaultPool() if len(self.pool.getHosts()) < 2: raise xenrt.XRTError("Need a pool of 2 hosts", data="Found %u" % (len(self.pool.getHosts())))
def deleteInstanceSnapshot(self, instance, name): raise xenrt.XRTError("Not implemented")
def sendEnable(self): raise xenrt.XRTError( 'Function sendEnable not implemented for base class')
def revertInstanceToSnapshot(self, instance, name): raise xenrt.XRTError("Not implemented")
def makeFS(self, disk): raise xenrt.XRTError("Unimplemented")
def instanceScreenshot(self, instance, path): raise xenrt.XRTError("Not implemented")
def paramSet(self, paramName, paramValue): if not self.host or not self.host.datacenter: raise xenrt.XRTError("guest.host not added to VCenter") command = r"""New-AdvancedSetting -Entity (Get-VM -Name %s) -Name "%s" -Value "%s" -Confirm:$false -Force:$true """ % ( self.name, paramName, paramValue) xenrt.lib.esx.getVCenter().execPowerCLI(command)
def existingInstance(self, name): raise xenrt.XRTError("Not implemented")
def run(self, arglist=None): machine = "RESOURCE_HOST_0" if arglist and len(arglist) > 0: machine = arglist[0] host = xenrt.TEC().registry.hostGet(machine) if not host: raise xenrt.XRTError("Unable to find host %s in registry" % (machine)) self.getLogsFrom(host) # Select allowed ports by product self.expected = string.split(xenrt.TEC().lookup("NMAP_ONLY_PORTS", "")) if len(self.expected) == 0: self.expected.extend( string.split( host.lookup("NMAP_ALLOWED_PORTS", "tcp/22 tcp/6936"))) self.allowedservices.extend(\ string.split(host.lookup("NMAP_ALLOWED_SERVICES", "nlockmgr"))) # Run nmap to scan open ports outfile = "%s/nmap.txt" % (self.tec.getLogdir()) xmlfile = "%s/nmap.xml" % (self.tec.getLogdir()) xenrt.nmap(host.getIP(), xmlfile, outfile) if not os.path.exists(xmlfile): raise xenrt.XRTError("nmap output file not found") # Parse nmap output ports = [] portlist = [] dom = xml.dom.minidom.parse(xmlfile) for i in dom.childNodes: if i.nodeType == i.ELEMENT_NODE and i.localName == "nmaprun": for c in i.childNodes: if c.nodeType == c.ELEMENT_NODE and c.localName == "host": for x in c.childNodes: if x.nodeType == x.ELEMENT_NODE and \ x.localName == "ports": for p in x.childNodes: if p.nodeType == p.ELEMENT_NODE and \ p.localName == "port": proto = p.getAttribute("protocol") port = p.getAttribute("portid") service = "UNKNOWN" state = "UNKNOWN" for z in p.childNodes: if z.nodeType == z.ELEMENT_NODE \ and z.localName == "service": service = z.getAttribute( "name") elif z.nodeType == z.ELEMENT_NODE \ and z.localName == "state": state = z.getAttribute("state") ports.append(("%s/%s" % (proto, port), service, state)) portlist.append("%s/%s" % (proto, port)) self.tec.logverbose("Parsed ports: %s" % ( ` ports `)) # Check expected open ports are open passed = True for i in self.expected: if re.search(r"^\(.+\)$", i): # Non-compulsory port pass elif not i in portlist: self.tec.reason("Port %s is not open" % (i)) passed = False else: self.tec.comment("Expected open port %s found to be open" % (i)) # Check for any unexpected open ports for i in ports: port, service, state = i if state == "open" or state == "UNKNOWN": if (not port in self.expected) and \ (not "(%s)" % (port) in self.expected): if not service in self.allowedservices: self.tec.reason("Unexpected port %s (%s) is open" % (port, service)) passed = False else: self.tec.comment( "Allowed service %s found on port %s" % (service, port)) if not passed: raise xenrt.XRTFailure()
def runAsWorkload(self, params=None): raise xenrt.XRTError("Not implemented")