def DeleteVmExceptDisksTests(host, newName): # If we delete a registered VM using llpm, VC does not do cleanup # Changing the name of test VM so other tests can run if options.vcHost: newName = newName + "delTest" hostSystem = GetHostSystem(host) llpm = GetLLPM(hostSystem) Log("Delete Registered VM") testVm = vm.CreateQuickDummy(newName, 1, host=hostSystem, datastoreName=options.datastore) vmPathName = testVm.GetConfig().GetFiles().GetVmPathName() DeleteVmExceptDisks(host, llpm, vmPathName) Log("Delete Unregistered VM") testVm = vm.CreateQuickDummy(newName + "1", 1, host=hostSystem, datastoreName=options.datastore) vmPathName = testVm.GetConfig().GetFiles().GetVmPathName() testVm.Unregister() DeleteVmExceptDisks(host, llpm, vmPathName)
def main(): """ Simple command-line program for creating virtual machines on a system managed by hostd. """ options = GetOptions() Connect(host=options.host, user=options.user, pwd=options.password) # Create vms envBrowser = GetEnv() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) for i in range(int(options.num_iterations)): vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i), options.num_scsi_disks, options.num_ide_disks, datastoreName=options.datastore_name, cfgOption=cfgOption, cfgTarget=cfgTarget) for _ in range(int(options.num_power_cycles)): clock = StopWatch() vm.PowerOn(vm1) clock.finish("PowerOn done") clock = StopWatch() vm.PowerOff(vm1) clock.finish("PowerOff done") # Delete the vm as cleanup if not options.dont_delete: task = vm1.Destroy() WaitForTask(task)
def setUp(self): """ Connects to the host and creates test VM. """ self._host = vimhost.Host() connect.SetSi(self._host._si) # Create dummy test VM self._vmname = "ttt.%s" % str(self.__class__.__module__) if (self._vmname is None): raise self.failureException( "Test VM name is not a valid path name. %s" % (self._vmname)) try: self.destroyTestVM(self._vmname) envBrowser = invt.GetEnv() cfgTarget = envBrowser.QueryConfigTarget(None) if len(cfgTarget.GetDatastore()) == 0: cm = host.GetHostConfigManager(self._si) dsm = cm.GetDatastoreSystem() # todo is this going to work on vmkernel dsm.CreateLocalDatastore("tttds1", "/var/tmp") # create a quick dummy test VM with one SCSI disk self._vm = vm.CreateQuickDummy(self._vmname, 1) vm1 = vm.VM(self._vm, None, None) if (not vm1.IsPoweredOff()): raise self.failureException( "Newly created test VM should be powered off.") except Exception as msg: raise self.failureException( "Failed to create test VM \"%s\" on host=\"%s\": %s" % (self._vmname, self._host, msg)) print("INFO: created vm %s " % (self._vmname))
def main(): supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "", "Password", "pwd"), (["v:", "vmname="], "SATATest", "Name of the virtual machine", "vmname") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" vm1 = None try: Log("Cleaning up VMs from previous runs...") vm.Delete(vmname, True) ## Positive tests on a hwVersion 10 VM Log("Creating Hw10 VM..") vm1 = vm.CreateQuickDummy(vmname, vmxVersion = "vmx-10", memory = 4, guest = "otherGuest") # Test add of SATA controller TestSataCtlrReconfig(vm1) # Mess with SATA disks TestEditSataDisk(vm1) # Mess with SATA cdroms TestEditSataCdrom(vm1) Log("Tests completed.") except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) finally: # Delete the vm as cleanup if noDelete == False: if vm1 != None: task = vm1.Destroy() WaitForTask(task) vm1 = None Log("TEST RUN COMPLETE: " + status)
def createTestVM(self, name): """ Create test vm in the root (host/user) resource pool. """ vm.CreateQuickDummy(name, 3, diskSizeInMB=4096) return self.searchIndex.FindByInventoryPath("ha-datacenter/vm/" + name)
def RunCreateTest(name): vm1 = vm.CreateQuickDummy(name) vm.PowerOn(vm1) vm.PowerOff(vm1) cfgPath = vm1.GetConfig().GetFiles().GetVmPathName() vm1.Unregister() folder.Register(cfgPath) vm.Destroy(vm1)
def TestVmMigrate(self): self.banner(self.TestVmMigrate) if len(self._hosts) <= 1: VerboseLog(logInfo,"not enough hosts..skipping") vmname = "test_migrate_vvol_vm" self.CleanupVm(vmname) host1 = self._hosts[0] host2 = self._hosts[1] scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: VerboseLog(logTrivia, "{ Creating bulk: ") create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "create failed for host " + result.hostKey) raise Exception("unexpected failure") ds = create_task.info.result[0].ds; testvm = vm.CreateQuickDummy(vmname, host=host1, datastoreName=ds.name, dc=self._dc.name, numScsiDisks=1, memory=12) vm.PowerOn(testvm) migrate_task = testvm.Migrate(host2.parent.resourcePool, host2, Vim.VirtualMachine.MovePriority.highPriority, None) task.WaitForTask(migrate_task) vm.PowerOff(testvm) vm.Destroy(testvm) VerboseLog(logTrivia, "{ Removing bulk: ") delete_task = self._vasaMgr.RemoveVVolDatastore(ds, self._hosts) task.WaitForTask(delete_task) VerboseLog(logVerbose, delete_task.info.result) for result in delete_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "remove failed for host " + result.hostKey) raise Exception("unexpected failure in bulk remove") except: VerboseLog(logTrivia, traceback.format_exc()) ret=False VerboseLog(logInfo, "passed" if ret else "failed");
def testKeyOnlyRemove(si, vmxVersion, ds): """This test verifies that it is possible to remove devices by passing a VirtualDevice object with only key specified. This is a legacy behavior present in Foundry, supported only for compatibility reasons. It is not recommended to use this functionality in any products.""" suffix = ''.join( random.choice(string.letters + string.digits) for i in xrange(8)) vm1Name = '-'.join(['KeyOnlyRemove', suffix]) print('Creating %s VM on %s' % (vm1Name, ds)) task.WaitForTasks( [vm1.Destroy() for vm1 in folder.GetVmAll() if vm1.name == vm1Name]) vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=1, numIdeDisks=1, diskSizeInMB=1, nic=1, cdrom=1, datastoreName=ds, vmxVersion=vmxVersion) print('Testing device removal via VirtualDevice with key set on VM %s' % vm1Name) #gather all the devices we want to remove devices = [ vim.vm.device.VirtualDevice(key=d.key) for d in vm1.config.hardware.device if isinstance(d, (vim.vm.device.VirtualEthernetCard, vim.vm.device.VirtualDisk, vim.vm.device.VirtualCdrom)) ] #prepare a config spec containing VirtualDevice "abstract" objects with keys we want #to remove cspec = vim.vm.ConfigSpec() for device in devices: vmconfig.AddDeviceToSpec( cspec, device, vim.vm.device.VirtualDeviceSpec.Operation.remove) #reconfigure the VM task.WaitForTask(vm1.Reconfigure(cspec)) #verify that the devices are removed devices = [ vim.vm.device.VirtualDevice(key=d.key) for d in vm1.config.hardware.device if isinstance(d, (vim.vm.device.VirtualEthernetCard, vim.vm.device.VirtualDisk, vim.vm.device.VirtualCdrom)) ] if len(devices) != 0: raise Exception("Not all devices were deleted!") #destroy the vm print('Done testing, destroying %s VM' % vm1Name) vm.Destroy(vm1)
def SetupOtherHost(self): print(self._host + " " + self._user + " " + self._pwd) siOther = SmartConnect(host=self._host, user=self._user, pwd=self._pwd) self._vm = vm.CreateQuickDummy(self._vmName, numScsiDisks=1, datastoreName=self._ds) if self._vm == None: raise "Failed to create VM with specified video ram size" Log("Other Host: Power On VM") vm.PowerOn(self._vm) Log("Other Host: Take Snapshot (no memory)") vm.CreateSnapshot(self._vm, "pre vdm snap", "Pre VDM snapshot", False, False)
def testEditDisk(options): name = getUniqueVmName() machine = folder.Find(name) if machine: vm.Destroy(machine) machine = vm.CreateQuickDummy(name, datastoreName=options.datastore, scsiCtlrs=1) Log("CreateVM(%s, %s)" % (name, options.datastore)) addFlatDisk(options, machine, shared=False) diskDev = vmconfig.CheckDevice(machine.config, VirtualDisk)[0] editDisk(options, machine, diskDev, shared=True) vm.Destroy(machine)
def BasicRetrieveTests(host, newName): Log("Retrieve tests on dummy VM's") Log("Creating VM on Source Host") hostSystem = GetHostSystem(host) testVm = vm.CreateQuickDummy(newName, 1, host=hostSystem, datastoreName=options.datastore) llpm = GetLLPM(hostSystem) config = testVm.GetConfig() vmPathName = config.GetFiles().GetVmPathName() Log("Registered VM %s" % vmPathName) task = llpm.RetrieveVmRecoveryInfo(vmPathName) WaitForTask(task) Log(str(task.info.result)) task = llpm.RetrieveLastVmMigrationStatus(vmPathName) WaitForTask(task) Log(str(task.info.result)) Log("Unregistered VM %s" % vmPathName) testVm.Unregister() task = llpm.RetrieveVmRecoveryInfo(vmPathName) WaitForTask(task) Log(str(task.info.result)) task = llpm.RetrieveLastVmMigrationStatus(vmPathName) WaitForTask(task) Log(str(task.info.result)) Log("Retrieve using file which is not present") try: task = llpm.RetrieveVmRecoveryInfo(vmPathName + "!@#!@!@!#") WaitForTask(task) Log(str(task.info.result)) raise Exception("Did not throw exception with invalid file") except Exception as e: print(e) Log("Retrieve using file which is not present") try: task = llpm.RetrieveLastVmMigrationStatus(vmPathName + "!@#!@!@!#") WaitForTask(task) Log(str(task.info.result)) raise Exception("Did not throw exception with invalid file") except Exception as e: print(e) task = llpm.DeleteVm(config) WaitForTask(task)
def main(): # Process command line host = "jairam-esx" if len(sys.argv) > 1: host = sys.argv[1] try: si = Connect(host) atexit.register(Disconnect, si) vm.CreateQuickDummy("CpuIdTest") v1 = folder.Find("CpuIdTest") print("Created a dummy") # Print current. print(v1.GetConfig().GetCpuFeatureMask()) # Change level 0 and level 80 config = Vim.Vm.ConfigSpec() lvl0 = Vim.Vm.ConfigSpec.CpuIdInfoSpec() info = Vim.Host.CpuIdInfo() info.SetLevel(0) info.SetEax("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info.SetEbx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info.SetEcx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info.SetEdx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") lvl0.SetOperation("add") lvl0.SetInfo(info) lvl1 = Vim.Vm.ConfigSpec.CpuIdInfoSpec() info2 = Vim.Host.CpuIdInfo() info2.SetLevel(1) info2.SetVendor("amd") info2.SetEax("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info2.SetEdx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") lvl1.SetOperation("add") lvl1.SetInfo(info2) config.SetCpuFeatureMask([lvl0, lvl1]) print("Assigned features") task = v1.Reconfigure(config) if WaitForTask(task) == "error": raise task.GetInfo().GetError() vm.Destroy(v1) except Exception as e: print("Failed test due to exception: %s" % e) raise
def main(): options, remainingOptions = ParseArgs(sys.argv[1:]) # Connect si = Connect(host=options.host, user=options.user, pwd=options.pwd, version="vim.version.version9") atexit.register(Disconnect, si) if options.verbose: logger.setLevel(logging.DEBUG) status = "PASS" for i in range(options.iter): try: logger.info("Starting iteration %d." % (i + 1)) vm1 = None logger.debug("Cleaning up VMs from previous runs...") vm.Delete(options.vmname, True) logger.debug("Creating Hw7 VM..") vm1 = vm.CreateQuickDummy(options.vmname, vmxVersion="vmx-07", memory=4, guest="rhel5Guest") logger.debug("Adding an extra config setting to the VM") SetExtraConfig(vm1, options.key, options.val1) logger.debug("Editing an extra config setting on the VM") SetExtraConfig(vm1, options.key, options.val2) logger.debug("Adding a bogus extra config setting on the VM") SetExtraConfig(vm1, options.invalidKey, "", False) logger.debug("Destroying VM") vm.Delete(options.vmname, True) logger.info("End of iteration %d." % (i + 1)) except Exception as e: logger.error("Caught exception : " + str(e)) status = "FAIL" logger.info("TEST RUN COMPLETE: " + status)
def main(): """ Simple command-line program for creating virtual machines on a system managed by hostd. """ options = GetOptions() Connect(host=options.host, user=options.user, namespace=newestVersions.GetNamespace('vim'), pwd=options.password) # Create vms envBrowser = GetEnv() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) for i in range(int(options.num_iterations)): vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i), options.num_scsi_disks, options.num_ide_disks, datastoreName=options.datastore_name, cfgOption=cfgOption, cfgTarget=cfgTarget) if options.opaquenetwork_id: config = Vim.Vm.ConfigSpec() config = vmconfig.AddOpaqueNetwork(config, cfgOption, opaqueNetworkId=options.opaquenetwork_id, \ opaqueNetworkType=options.opaquenetwork_type, \ externalId=options.externalID) vm.Reconfigure(vm1, config) for _ in range(int(options.num_power_cycles)): clock = StopWatch() vm.PowerOn(vm1) clock.finish("PowerOn done") clock = StopWatch() vm.PowerOff(vm1) clock.finish("PowerOff done") # Delete the vm as cleanup if not options.dont_delete: task = vm1.Destroy() WaitForTask(task)
def ToggleCreate(toggle): """ Create/Delete a given VM """ if toggle == "on": print "== Creating VM" vmTemp = vm.CreateQuickDummy(_vmName, 1) if vmTemp == None: print "Error in creating: ", _vmName return(1) return(0) else: print "== Deleting VM" vm1 = GetVmByName(_vmName) if vm1 == None: print "Unable to find VM", _vmName return(1) task = vm1.Destroy() if WaitForTask(task) == "error": return(1) return(0)
def doCommonTests(): print("Running common tests") # parameterize if needed vmName = "linkedCloneTest" numDisks = 1 print("Creating virtual machine with name " + vmName) myVM = vm.CreateQuickDummy(vmName, numDisks) try: # Add a delta disk on top of the base disk # using reconfigure() reconfigureTest(myVM) # Create a new VM which links up to the base disk # of the other VM vmDisk = findDisk(myVM) createTest(vmDisk.backing.parent.fileName) finally: myVM.Destroy()
def testAddDisk(options, online, shared): name = getUniqueVmName() machine = folder.Find(name) if machine: vm.Destroy(machine) machine = vm.CreateQuickDummy(name, datastoreName=options.datastore, scsiCtlrs=1) Log("CreateVM(%s, %s)" % (name, options.datastore)) if online: vm.PowerOn(machine) Log("PowerOn(%s)" % machine.name) addFlatDisk(options, machine, shared) addRdmDisk(options, machine, shared) if online: vm.PowerOff(machine) Log("PowerOff(%s)" % machine.name) vm.Destroy(machine)
def main(): """ Simple command-line program for creating virtual machines on a system managed by hostd. """ options = GetOptions() curSi = SmartConnect(host=options.host, user=options.user, pwd=options.password) # Create vms envBrowser = GetEnv() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) vmList = [] tasks = [] clock = StopWatch() for i in range(int(options.num_iterations)): vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i), options.num_scsi_disks, options.num_ide_disks, datastoreName=options.datastore_name, cfgOption=cfgOption, cfgTarget=cfgTarget) vmList.append(vm1) if options.opaquenetwork_id: config = Vim.Vm.ConfigSpec() config = vmconfig.AddOpaqueNetwork(config, cfgOption, opaqueNetworkId=options.opaquenetwork_id, \ opaqueNetworkType=options.opaquenetwork_type) task = vm1.Reconfigure(config) tasks.append(task) WaitForTasks(tasks) clock.finish("Reconfigure VMs done") # Delete the vm as cleanup if not options.dont_delete: clock = StopWatch() WaitForTasks([curVm.Destroy() for curVm in vmList]) clock.finish("Destroy VMs done")
def mainTestFirmware(): Log("---[ TEST " + testName + " ]---") vmname = "HwV8_Firmware" status = "PASS" bigClock = StopWatch() vm1 = None try: macosVmName = vmname + "_MacOS" Log("Cleaning up VMs from previous runs...") vm.Delete(macosVmName, True) Log("Creating Mac OS VM..") vm1 = vm.CreateQuickDummy(macosVmName, vmxVersion="vmx-08", memory=4, guest="darwin11Guest") firmware = "efi" ChangeFirmware(vm1, firmware, True) if firmware != vm1.config.firmware: raise Exception("Firmware don't match set value") firmware = "bios" ChangeFirmware(vm1, firmware, True) if firmware != vm1.config.firmware: raise Exception("Firmware don't match set value") Log("Deleting VM " + macosVmName) vm.Delete(macosVmName, True) bigClock.finish(testName) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST [" + testName + "] COMPLETE: " + status) return status
def on(self): try: vmName = "vm" + self.getClientId() print "Creating vm" + vmName self.vm = vm.CreateQuickDummy(vmName, 1) if self.vm == None: print "** Error in creating vm" self.success = False print self.getClientId() + " on failed" return print "Adding VM disk" task = self.vm.Reconfigure(self.addConfigSpec) WaitForTask(task) print "Powering On vm" vm.PowerOn(self.vm) print "Creating snapshot" vm.CreateSnapshot(self.vm, "testSnapshot", "Test snap", False, False) self.success = True print self.getClientId() + " on successful" except: print self.getClientId() + " on failed" self.success = False
def TestRemoveVvolDsWithVms(self): self.banner(self.TestRemoveVvolDsWithVms) VerboseLog(logTrivia, self._host) scId = self._sc vmname = "vvoldummy" self.CleanupExistingTestDatastores() self.CleanupVm(vmname) spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId); spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) testvm = None ds = None try: ds = self.CreateDs(spec) testvm = vm.CreateQuickDummy(vmname, host=self._host, datastoreName=ds.name, dc=self._dc.name, numScsiDisks=1, memory=10) self.removeDs(ds) except Vim.Fault.ResourceInUse: if testvm != None: vm.Destroy(testvm) if ds != None: self.removeDs(ds) pass except: VerboseLog(logInfo, traceback.format_exc()) VerboseLog(logInfo, 'failed') VerboseLog(logInfo, "passed")
def main(): supportedArgs = [ (["P:", "primary host="], "localhost", "Primary host name", "primaryHost"), (["S:", "secondary host="], "localhost", "Secondary host name", "secondaryHost"), (["d:", "shared datastore name="], "storage1", "shared datastore name", "dsName"), (["k:", "keep="], "0", "Keep configs", "keep"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "", "Password", "pwd"), (["v:", "vmname="], "vmFT", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter"), (["t:", "FT type="], "up", "Type of fault tolerance [up|smp]", "ftType"), ] supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) keep = int(args.GetKeyValue("keep")) dsName = args.GetKeyValue("dsName") primaryHost = args.GetKeyValue("primaryHost") secondaryHost = args.GetKeyValue("secondaryHost") ftType = args.GetKeyValue("ftType") numCPU = 2 if ftType == "smp" else 1 memSize = 64 for i in range(numiter): primaryVm = None primarySi = None secondarySi = None try: # Connect to primary host primarySi = SmartConnect(host=primaryHost, user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to Primary host") # Cleanup from previous runs try: CleanupVm(vmname) except vim.fault.InvalidOperationOnSecondaryVm: pass # Connect to secondary host secondarySi = SmartConnect(host=secondaryHost, user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to Secondary host") for si in [primarySi, secondarySi]: if len(FindNicType(si, ftLoggingNicType)) == 0: SelectVnic(si, "vmk0", ftLoggingNicType) if len(FindNicType(si, vmotionNicType)) == 0: SelectVnic(si, "vmk0", vmotionNicType) ftMgrDst = host.GetFaultToleranceMgr(secondarySi) # Cleanup from previous runs CleanupVm(vmname) CleanupVm(vmname, True) connect.SetSi(primarySi) CleanupDir(dsName, vmname) if ftType == "smp": CleanupDir(dsName, "%s_shared" % vmname) # Create new VM Log("Creating primary VM " + vmname) primaryVm = vm.CreateQuickDummy(vmname, guest="winNetEnterpriseGuest", numScsiDisks=2, scrubDisks=True, memory=memSize, datastoreName=dsName) primaryUuid = primaryVm.GetConfig().GetInstanceUuid() primaryCfgPath = primaryVm.GetConfig().GetFiles().GetVmPathName() primaryDir = primaryCfgPath[:primaryCfgPath.rfind("/")] ftMetadataDir = GetSharedPath(primarySi, primaryVm) Log("Using VM : " + primaryVm.GetName() + " with instanceUuid " + primaryUuid) ftMetadataDir = GetSharedPath(primarySi, primaryVm) cSpec = vim.vm.ConfigSpec() if ftType != "smp": # Enable record/replay for the primaryVm # See PR 200254 flags = vim.vm.FlagInfo(recordReplayEnabled=True) cSpec.SetFlags(flags) task = primaryVm.Reconfigure(cSpec) WaitForTask(task) Log("Enabled record/replay for Primary VM.") CheckFTState( primaryVm, vim.VirtualMachine.FaultToleranceState.notConfigured) else: cSpec.files = vim.vm.FileInfo( ftMetadataDirectory=ftMetadataDir) cSpec.numCPUs = numCPU task = primaryVm.Reconfigure(cSpec) WaitForTask(task) # Create secondary VM connect.SetSi(secondarySi) Log("Creating secondary VM " + vmname) secondaryVm = vm.CreateQuickSecondary(vmname, primaryVm, ftType=ftType, scrubDisks=True, numScsiDisks=2, datastoreName=dsName, ftMetadataDir=ftMetadataDir) if secondaryVm == None: raise "Secondary VM creation failed" secondaryUuid = secondaryVm.GetConfig().GetInstanceUuid() secondaryCfgPath = secondaryVm.GetConfig().GetFiles( ).GetVmPathName() Log("Created secondary VM " + secondaryVm.GetName()) Log("Secondry VM: instanceUuid " + secondaryUuid) Log("Secondary cfg path: " + secondaryCfgPath) ## Configure some additional config variables needed for FT ## This should eventually be done automatically at FT Vmotion time Log("Setting up extra config settings for the primary VM...") cSpec = vim.Vm.ConfigSpec() extraCfgs = [] if ftType == "smp": # some of these options are temporary cSpec.flags = vim.vm.FlagInfo( faultToleranceType=FTType.checkpointing) AddExtraConfig(extraCfgs, "ftcpt.maxDiskBufferPages", "0") AddExtraConfig(extraCfgs, "sched.mem.pshare.enable", "FALSE") AddExtraConfig(extraCfgs, "sched.mem.fullreservation", "TRUE") AddExtraConfig(extraCfgs, "monitor_control.disable_mmu_largepages", "TRUE") AddExtraConfig(extraCfgs, "sched.mem.min", memSize) AddExtraConfig(extraCfgs, "migration.dataTimeout", "2000") cSpec.files = vim.vm.FileInfo( ftMetadataDirectory=ftMetadataDir) else: cSpec.flags = vim.vm.FlagInfo( faultToleranceType=FTType.recordReplay) AddExtraConfig(extraCfgs, "replay.allowBTOnly", "TRUE") cSpec.SetExtraConfig(extraCfgs) WaitForTask(primaryVm.Reconfigure(cSpec)) # Register secondary VM Log("Register secondary VM with the primary") ftMgr = host.GetFaultToleranceMgr(primarySi) connect.SetSi(primarySi) task = ftMgr.RegisterSecondary(primaryVm, secondaryUuid, secondaryCfgPath) WaitForTask(task) Log("Secondary VM registered successfully") # Verify FT role & state CheckFTRole(primaryVm, 1) CheckFTState(primaryVm, vim.VirtualMachine.FaultToleranceState.enabled) Log("FT configured successfully.") # PowerOn FT VM Log("Powering on Primary VM") vm.PowerOn(primaryVm) if ftType == "smp": # some of these options are temporary task = primaryVm.CreateSnapshot("snap-early", "before secondary starts", memory=False, quiesce=True) WaitForTask(task) # Perform the FT VMotion Log("Calling StartSecondary on remote host...") primaryThumbprint = GetHostThumbprint(primaryHost) secondaryThumbprint = GetHostThumbprint(secondaryHost) Log("Primary thumbprint: %s" % primaryThumbprint) Log("Secondary thumbprint: %s" % secondaryThumbprint) secondaryHostSystem = secondarySi.content.rootFolder.childEntity[ 0].hostFolder.childEntity[0].host[0] sslThumbprintInfo = vim.host.SslThumbprintInfo( ownerTag='hostd-test', principal='vpxuser') sslThumbprintInfo.sslThumbprints = [primaryThumbprint] secondaryHostSystem.UpdateSslThumbprintInfo( sslThumbprintInfo, "add") sslThumbprintInfo.sslThumbprints = [secondaryThumbprint] primaryHostSystem = primarySi.content.rootFolder.childEntity[ 0].hostFolder.childEntity[0].host[0] primaryHostSystem.UpdateSslThumbprintInfo(sslThumbprintInfo, "add") task = ftMgr.StartSecondaryOnRemoteHost(primaryVm, secondaryCfgPath, secondaryHost, 80, secondaryThumbprint) WaitForTask(task) Log("Start secondary done.") if ftType == "smp": # Verify snapshot is gone if primaryVm.snapshot is not None: raise Exception("Snapshot still exists on primary") task = primaryVm.CreateSnapshot("snap", "without memory snapshot", memory=False, quiesce=True) WaitForTask(task) if not primaryVm.snapshot or not primaryVm.snapshot.currentSnapshot: raise Exception("Snapshot was not created") else: Log("Snapshot %s exists as expected" % primaryVm.snapshot.currentSnapshot) # Retrieve reference to new secondary VM connect.SetSi(secondarySi) secondaryVm = folder.FindCfg(secondaryCfgPath) connect.SetSi(primarySi) # FT state check CheckFTState(primaryVm, vim.VirtualMachine.FaultToleranceState.running) CheckFTState(secondaryVm, vim.VirtualMachine.FaultToleranceState.running) Log("Start secondary done.") # allows some time for FT to run and checkpoint before failing # over. This seems more necessary on nested VM environments # than physical time.sleep(20) Log("Failing over to the secondary.") WaitForTask(ftMgr.MakePrimary(primaryVm, secondaryUuid)) WaitForPowerState(primaryVm, primarySi, vim.VirtualMachine.PowerState.poweredOff) Log("Verified primary power state is off.") WaitForFTState(secondaryVm, FTState.needSecondary) Log("Starting secondary.") task = ftMgrDst.StartSecondaryOnRemoteHost(secondaryVm, primaryCfgPath, primaryHost, 80, primaryThumbprint) WaitForTask(task) # Verify snapshot is gone if primaryVm.snapshot is not None: raise Exception("Snapshot still exists on old primary") Log("Failing over to the old-primary.") WaitForTask(ftMgrDst.MakePrimary(secondaryVm, secondaryUuid)) WaitForPowerState(secondaryVm, secondarySi, vim.VirtualMachine.PowerState.poweredOff) Log("Verified primary power state is off.") WaitForFTState(primaryVm, FTState.needSecondary) task = ftMgr.StartSecondaryOnRemoteHost(primaryVm, secondaryCfgPath, secondaryHost, 80, secondaryThumbprint) WaitForTask(task) # PowerOff FT VMs Log("Power off Primary VM") vm.PowerOff(primaryVm) connect.SetSi(secondarySi) for i in range(10): if secondaryVm.GetRuntime().GetPowerState( ) == vim.VirtualMachine.PowerState.poweredOn: time.sleep(1) if secondaryVm.GetRuntime().GetPowerState( ) == vim.VirtualMachine.PowerState.poweredOn: raise Exception("Secondary VM is still powered on!") Log("Verified secondary power state.") Log("Unregistering secondary VM " + vmname) ftMgrDst.Unregister(secondaryVm) # Cleanup if not keep: connect.SetSi(primarySi) CleanupVm(vmname) CleanupDir(dsName, vmname) if ftType == "smp": CleanupDir(dsName, "%s_shared" % vmname) connect.SetSi(secondarySi) CleanupVm(vmname, True) except Exception as e: Log("Caught exception : %s" % e) stackTrace = " ".join( traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])) Log(stackTrace) global status status = "FAIL" Disconnect(primarySi) Disconnect(secondarySi) return Disconnect(primarySi) Disconnect(secondarySi)
def main(): global verbose # Process command line try: opts, args = getopt.getopt(sys.argv[1:], "bp:qs:d:uhv:c:n:m:", \ ["source=", "dest=", "vc=", "vm=", "datacenter=", \ "usage", "help", "cleanup", "verbose", "repeat=", \ "network=", "numnics=", "numdisks=", "disksize=", "attachcdrom", \ "maxfailures"]) except getopt.GetoptError: Usage() sys.exit(-1) sourceHostName = None destHostName = None datacenterName = None vcName = None vmName = "DmotionTester" doCleanup = 0 count = 1 network = None numNics = 0 numDisks = 1 diskSize = 4 attachCd = 0 maxFailures = 0 numFailures = 0 for o, a in opts: if o in ("-s", "--source"): sourceHostName = a if o in ("-d", "--dest"): destHostName = a if o in ("-v", "--vc"): vcName = a if o in ("-c", "--datacenter"): datacenterName = a if o in ("-n", "--vm"): vmName = a if o in ("-q", "--cleanup"): doCleanup = 1 if o in ("-b", "--verbose"): verbose = 1 Log("Verbose level set") if o in ("-p", "--repeat"): count = int(a) if o == "--network": network = a if o == "--numnics": numNics = int(a) if o == "--numdisks": numDisks = int(a) if o == "--disksize": diskSize = int(a) if o == "--attachcdrom": attachCd = 1 if o in ("-m", "--maxfailures"): maxFailures = int(a) if o in ("-u", "--usage", "-h", "--help"): Usage() sys.exit(0) if (None in (sourceHostName, destHostName, vcName, datacenterName)) \ or (numNics > 0 and network == None): print("VC Host : %s" % vcName) print("Datacenter : %s" % datacenterName) print("Source host: %s" % sourceHostName) print("Dest host : %s" % destHostName) Usage() sys.exit(-1) # Connect Log("Connecting to VirtualCenter: " + vcName) si = Connect(vcName, 902, "Administrator", "ca$hc0w", "vpxd") atexit.register(Disconnect, si) # Assumes no folderization inside the datacenter root host folder. Log("Locating source and destination hosts: " + sourceHostName + ", " + destHostName) hosts = GetHostFolder(datacenterName) sourceCrRef = FindChild(hosts, sourceHostName) destCrRef = FindChild(hosts, destHostName) # Get the environment browser for the source host browser = sourceCrRef.GetEnvironmentBrowser() # Get the datastore list for the dest host destDsList = destCrRef.GetDatastore() finalDs = None # Find a reasonable destination datastore ( > 4GB free and vmfs 3) for i in range(0, len(destDsList)): if (destDsList[i].GetCapability().GetDirectoryHierarchySupported()): if (destDsList[i].GetSummary().GetFreeSpace() > 4 * 1024 * 1024 * 1024): finalDs = destDsList[i] if finalDs == None: print("Failed to find a suitable datastore. quitting") sys.exit(-1) Log("Found a suitable destination datasource: " + finalDs.GetSummary().GetName()) Log("Going to loop " + str(count) + " times") for i in range(0, count): # Create the virtual machine on source, power on the virtual machine\ # and issue a relocate. try: vmNameNew = vmName if (count > 1): vmNameNew = vmName + str(i) Log("Creating virtual machine with name: " + vmNameNew) vm1 = vm.CreateQuickDummy(vmNameNew, numDisks, numNics, attachCd, \ sourceCrRef.GetHost()[0], sourceCrRef.GetResourcePool(), browser, diskSize, network) Log("Powering on... ") vm.PowerOn(vm1) time.sleep(2) relocSpec = Vim.Vm.RelocateSpec() relocSpec.SetDatastore(finalDs) relocSpec.SetHost(destCrRef.GetHost()[0]) relocSpec.SetPool(destCrRef.GetResourcePool()) Log("Invoking Dmotion... ") vimutil.InvokeAndTrack(vm1.Relocate, relocSpec) time.sleep(2) if doCleanup == 1: Log("Cleanup requested: Powering off") vimutil.InvokeAndTrack(vm1.PowerOff) Log("Destroying the successfully vmotioned virtual machine") vm1.Destroy() time.sleep(2) except Exception as e: print("Failed test due to exception") print("Info: ") print(e.__str__) if (numFailures >= maxFailures): raise else: numFailures = numFailures + 1
def testPromoteDisks(si, numDisks, numiter, backingType, vmxVersion, ds1, ds2, status, resultsArray): for i in range(numiter): bigClock = StopWatch() try: try: vm1Name = "Parent" + str(i) vm1 = folder.Find(vm1Name) if vm1 != None: Log("Cleaning up old vm with name: " + vm1Name) vm1.Destroy() # Create a simple vm with numDisks on ds1 vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=numDisks, \ datastoreName=ds1, diskSizeInMB=1, \ vmxVersion=vmxVersion, \ backingType=backingType) Log("Created parent VM1 --" + vm1Name) vm1DirName = vm1.config.files.snapshotDirectory # Create snapshot vm.CreateSnapshot(vm1, "S1", "S1 is the first snaphost", \ False, False) snapshotInfo = vm1.GetSnapshot() S1Snapshot = snapshotInfo.GetCurrentSnapshot() Log("Created Snapshot S1 for VM1") # Get the name of the parent disks disks = vmconfig.CheckDevice(S1Snapshot.GetConfig(), \ Vim.Vm.Device.VirtualDisk) if len(disks) != numDisks: raise Exception("Failed to find parent disks") parentDisks = [None] * len(disks) for i in range(len(disks)): parentDisks[i] = disks[i].GetBacking().GetFileName() # Create a VM2 on ds2 that is linked off S1 vm2Name = "LinkedClone" + str(i) configSpec = vmconfig.CreateDefaultSpec(name=vm2Name, datastoreName=ds2) configSpec = vmconfig.AddScsiCtlr(configSpec) configSpec = vmconfig.AddScsiDisk(configSpec, datastorename=ds2, capacity=1024, backingType=backingType) configSpec.SetVersion(vmxVersion) childDiskBacking = configSpec.GetDeviceChange()[1].\ GetDevice().GetBacking() parentBacking = GetBackingInfo(backingType) parentBacking.SetFileName(parentDisks[0]) childDiskBacking.SetParent(parentBacking) childDiskBacking.SetDeltaDiskFormat("redoLogFormat") resPool = invt.GetResourcePool() vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool) vm2 = folder.Find(vm2Name) Log("Created child VM2 --" + vm2Name) vm2DirName = vm2.config.files.snapshotDirectory # create delta disks off VM1 on VM2 Log("Adding delta disks off VM1 to VM2") configSpec = Vim.Vm.ConfigSpec() for i in range(len(parentDisks)): configSpec = vmconfig.AddScsiDisk(configSpec, \ datastorename = ds2, \ cfgInfo = vm2.GetConfig(), \ backingType = backingType) SetDeltaDiskBacking(configSpec, i, parentDisks[i]) vimutil.InvokeAndTrack(vm2.Reconfigure, configSpec) Log("Power (on) vm1") vm.PowerOn(vm1) time.sleep(5) Log("Power (on) vm2") vm.PowerOn(vm2) time.sleep(5) # prepare promoteDisksSpec diskList = GetVirtualDisks(vm2) promoteDisksSpec = [None] * len(diskList) for i in range(len(diskList)): promoteDisksSpec[i]=vim.host.LowLevelProvisioningManager.\ PromoteDisksSpec() promoteDisksSpec[i].SetNumLinks(1) promoteDisksSpec[i].SetOffsetFromBottom(0) diskId = diskList[i].GetKey() promoteDisksSpec[i].SetDiskId(diskId) Log("Calling LLPM PromoteDisks") llpm = invt.GetLLPM() try: task = llpm.PromoteDisks(vm2, promoteDisksSpec) WaitForTask(task) except Exception as e: print(e) Log("Caught exception : " + str(e)) status = "FAIL" status = "PASS" Log("Destroying VMs") vm.PowerOff(vm2) time.sleep(5) vm.PowerOff(vm1) time.sleep(5) vm2.Destroy() vm1.Destroy() finally: bigClock.finish("iteration " + str(i)) except Exception as e: Log("Caught exception : " + str(e)) status = "FAIL" Log("TEST RUN COMPLETE: " + status) resultsArray.append(status) Log("Results for each iteration: ") for i in range(len(resultsArray)): Log("Iteration " + str(i) + ": " + resultsArray[i])
def main(): supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "Hw7ReconfigTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in range(numiter): bigClock = StopWatch() vm1 = None try: ## Cleanup old VMs vm1 = folder.Find(vmname) if vm1 != None: vm1.Destroy() Log("Creating virtual machine") vm1 = vm.CreateQuickDummy(vmname, 1, diskSizeInMB = 4096) devices = vmconfig.CheckDevice(vm1.GetConfig(), Vim.Vm.Device.VirtualDisk) if len(devices) < 1: raise Exception("Failed to find added disk!") cspec = Vim.Vm.ConfigSpec() for i in range(0, len(devices)) : disk = devices[i] backing = disk.GetBacking() backing.SetEagerlyScrub(True) disk.SetBacking(backing) vmconfig.AddDeviceToSpec(cspec, disk, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit) Log("Scrubbing existing disks of the VM") task = vm1.Reconfigure(cspec) WaitForTask(task) Log("Add a new scrubbed disk to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddScsiCtlr(cspec) vmconfig.AddScsiDisk(cspec, capacity = 128 * 1024, scrub = True) #task = vm1.Reconfigure(cspec) #WaitForTask(task) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "HotPlugTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" # Find a USB device on the host to passthrough envBrowser = invt.GetEnv() cfgTarget = envBrowser.QueryConfigTarget(None) if len(cfgTarget.usb) < 1: Log("No USB devices available for passthrough on " + args.GetKeyValue("host")) return device = cfgTarget.usb[0] for i in range(numiter): bigClock = StopWatch() vm1 = None try: vmname7 = vmname + "_HwV7" vmname8 = vmname + "_HwV8" Log("Cleaning up VMs from previous runs...") vm.Delete(vmname7, True) vm.Delete(vmname8, True) ## Positive tests on a hwVersion 7 VM Log("Creating Hw7 VM..") vm1 = vm.CreateQuickDummy(vmname7, vmxVersion="vmx-07", memory=4, guest="rhel5Guest") Log("Add a new USB controller to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddUSBCtlr(cspec) vm.Reconfigure(vm1, cspec) DoPlugTests(vm1, device, Vim.Vm.Device.VirtualUSBController, True) vm.Delete(vm1.name, True) ## Positive tests on a hwVersion 8 VM Log("Creating Hw8 VM..") vm1 = vm.CreateQuickDummy(vmname8, vmxVersion="vmx-08", memory=4, guest="rhel5Guest") Log("Add a new xHCI USB controller to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddUSBXHCICtlr(cspec) vm.Reconfigure(vm1, cspec) xhciCtlr = CheckDevice(vm1, Vim.Vm.Device.VirtualUSBXHCIController, "xHCI controller") DoPlugTests(vm1, device, Vim.Vm.Device.VirtualUSBXHCIController, True) Log("Add a new USB controller to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddUSBCtlr(cspec) vm.Reconfigure(vm1, cspec) usbCtlr = CheckDevice(vm1, Vim.Vm.Device.VirtualUSBController, "USB controller") DoPlugTests(vm1, device, Vim.Vm.Device.VirtualUSBController, True) Log("Remove xHCI USB controller from the VM") cspec = vmconfig.RemoveDeviceFromSpec(Vim.Vm.ConfigSpec(), xhciCtlr) vm.Reconfigure(vm1, cspec) CheckNoDevice(vm1, Vim.Vm.Device.VirtualUSBXHCIController, "xHCI controller") Log("Remove USB controller from the VM") cspec = vmconfig.RemoveDeviceFromSpec(Vim.Vm.ConfigSpec(), usbCtlr) vm.Reconfigure(vm1, cspec) CheckNoDevice(vm1, Vim.Vm.Device.VirtualUSBController, "USB controller") vm.Delete(vm1.name, True) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "SerialPortTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in xrange(numiter): bigClock = StopWatch() vm1 = None try: Log("Cleaning up VMs from previous runs...") vm.Delete(vmname, True) Log("Creating VM..") vm1 = vm.CreateQuickDummy(vmname) Log("Adding serial port with device backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.DeviceBackingInfo) Log("Adding serial port with file backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.FileBackingInfo) Log("Adding serial port with pipe backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.PipeBackingInfo) Log("Adding serial port with URI backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.URIBackingInfo) Log("Deleting VM..") vm.Delete(vmname, True) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def setUp(self): # Need a vm for base power operations. Create a simple one Connect("jairam-esx") self.vmname = folder.FindUniqueName(self.vmname) self.v1 = vm.CreateQuickDummy(self.vmname, 1)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "", "Password", "pwd"), (["v:", "vmname="], "VTPMTest", "Name of the virtual machine", "vmname")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" vm1 = None try: Log("Cleaning up VMs from previous runs...") vm.Delete(vmname, True) Log("Prepare host for crypto...") vimcrypto.CryptoEnableHost(host.GetHostSystem(si)) Log("Adding key for encrypted VM...") cryptoMgr = si.RetrieveContent().cryptoManager keyId, cryptoKey = CreateKeyForVM(si) cryptoMgr.AddKeys([cryptoKey]) ## vTPM requires hardware version 14. Log("Creating Hw14 VM...") cfg = vm.CreateQuickDummySpec(vmname, vmxVersion="vmx-14", memory=4, guest="otherGuest") cryptoSpec = vimcrypto.CreateCryptoSpecEncrypt(keyId, None) cfg.crypto = cryptoSpec AddVTPM(cfg) vm1 = vm.CreateFromSpec(cfg) TestVTPMProps(vm1) task = vm1.Destroy() WaitForTask(task) vm1 = None vm1 = vm.CreateQuickDummy(vmname, vmxVersion="vmx-14", memory=4, guest="otherGuest") Log("Encrypting VM...") vimcrypto.EncryptVM(vm1, keyId, None, disks=False) TestVTPMReconfig(vm1) TestNoVTPM(vm1) TestVTPMVDRemove(vm1) Log("Tests completed.") except Exception as e: status = "FAIL" Log("Caught exception : %s, %r" % (e, e)) raise finally: # Delete the vm as cleanup if noDelete == False: if vm1 != None: task = vm1.Destroy() WaitForTask(task) vm1 = None Log("TEST RUN COMPLETE: " + status)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "CreateScreenshot-VM", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) status = "PASS" for i in range(numiter): vm1 = None # Cleanup from previous runs vm1 = folder.Find(vmname) if vm1 != None: vm1.Destroy() # Create new VM vm1 = vm.CreateQuickDummy(vmname, guest="winXPProGuest") print("Using VM : " + vm1.GetName()) try: # CreateScreenshot when VM is powered off print("Attemp to CreateScreenshot for a powered-off VM") try: vm.CreateScreenshot(vm1) status = "FAIL" return except Exception as e: print("Verified negative test case and got an exception") print("Caught exception : " + str(e)) print("Powering on the VM...") vm.PowerOn(vm1) # CreateScreenshot when VM is powered on print("Attempt to CreateScreenshot for a powered-on VM") for i in range(10): task = vm1.CreateScreenshot() WaitForTask(task) screenshotPath = task.GetInfo().GetResult() print("The datastore path of the screenshot is: " + screenshotPath) print("Suspending the VM...") vm.Suspend(vm1) # CreateScreenshot when VM is suspended print("Attempt to CreateScreenshot for a suspended VM") try: vm.CreateScreenshot(vm1) status = "FAIL" return except Exception as e: print("Verified negative test case and got an exception") print("Caught exception : " + str(e)) # Delete the VM and check whether the screenshot files are deleted print("Deleting the VM...") delTask = vm1.Destroy() WaitForTask(delTask) except Exception as e: print("Caught exception : " + str(e)) status = "FAIL" if status == "FAIL": break print("Test status : " + str(status)) return