def DeltaTest(dc, dsName, host, backingType, deltaDiskFormat, diskSize, vmx, positive, test): msg = test + "positive =" + str(positive) + '; ' + \ "backing=" + backingType + '; ' + \ "delta=" + str(deltaDiskFormat) + '; ' + \ "vmx version=" + str(vmx) +'; ' + \ 'diskSize=' + str(diskSize) +'; ' + 'result=' vm1 = None snapshotVm = None try: spec = CreateVmSpec(backingType + "Vm", dsName, backingType, diskSize, vmx) vm1 = VmTest(spec, dc, host, remove=False) vm.CreateSnapshot(vm1, "S1", "snap shot of vm1", False, False) snapshot = vm1.GetSnapshot().GetCurrentSnapshot() disks = vmconfig.CheckDevice(snapshot.GetConfig(), \ Vim.Vm.Device.VirtualDisk) if len(disks) != 1: print(snapshot.GetConfig()) raise AssertionError("Did not find correct number of snapshot") parentDisk = disks[0].GetBacking().GetFileName() spec = CreateVmSpec(backingType + "Vm-Clone", dsName, backingType, diskSize, vmx) childDiskBacking = spec.GetDeviceChange()[1].GetDevice().GetBacking() #print(childDiskBacking) if backingType == 'flat': parentBacking = Vim.Vm.Device.VirtualDisk.FlatVer2BackingInfo() if backingType == 'seSparse': parentBacking = Vim.Vm.Device.VirtualDisk.SeSparseBackingInfo() parentBacking.SetFileName(parentDisk) childDiskBacking.SetParent(parentBacking) childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat) #print(spec) snapshotVm = VmTest(spec, dc, host, remove=False) except Vmodl.MethodFault as e: if not positive: print(msg + 'SUCCESS') print(e) else: print(msg + "FAILURE") raise except Exception: print(msg + "FAILURE") raise else: if positive: print(msg + 'SUCCESS') else: print(msg + "FAILURE, negative test through") raise AssertionError(msg + "FAILURE, negative test through") finally: if vm1: vm.Destroy(vm1) if snapshotVm: vm.Destroy(snapshotVm)
def testEditDisk(options): name = getUniqueVmName() machine = folder.Find(name) if machine: vm.Destroy(machine) machine = vm.CreateQuickDummy(name, datastoreName=options.datastore, scsiCtlrs=1) Log("CreateVM(%s, %s)" % (name, options.datastore)) addFlatDisk(options, machine, shared=False) diskDev = vmconfig.CheckDevice(machine.config, VirtualDisk)[0] editDisk(options, machine, diskDev, shared=True) vm.Destroy(machine)
def cleanup(): try: for vm1 in vmRefs: vm.PowerOff(vm1) vm.Destroy(vm1) except Exception: pass
def VmTest(spec, dc, host, resPool, remove=True): print("vm resoure pool is " + str(resPool)) thisVm = vm.CreateFromSpec(spec, dc.GetName(), host, resPool) if remove: vm.Destroy(thisVm) else: return thisVm
def cleanup(dvsUuidList): """ Remove the dvs created as part of the setup phase. Assumes no clients are connected. """ if options.nocleanup: print("Not doing cleanup as requested") return vm1 = folder.Find(options.vmName) if vm1 != None: try: vm.PowerOff(vm1) except Exception as e: pass vm.Destroy(vm1) dvsManager = si.RetrieveInternalContent( ).hostDistributedVirtualSwitchManager # If list is None, script collapses here if dvsUuidList != None: for dvsUuid in dvsUuidList: try: dvsManager.RemoveDistributedVirtualSwitch(dvsUuid) except Exception as e: print(e) del dvsUuidList[:] try: dvsManager.RemoveDistributedVirtualSwitch(options.uuid) except Exception as e: print(e)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "", "VM prefix (deletes everything by default)", "vmname")] supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) # Process command line vmname = args.GetKeyValue("vmname") vms = folder.FindPrefix(vmname) if len(vms) != 0: for vm in vms: try: vm.Destroy() except Exception, e: raise
def CleanupVm(self, vmname): oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: if oldVm.GetRuntime().GetPowerState() == \ Vim.VirtualMachine.PowerState.poweredOn: vm.PowerOff(oldVm) vm.Destroy(oldVm)
def CleanupVm(vmname, useLlpm=False): with LogSelfOp() as logOp: if isinstance(vmname, vim.VirtualMachine): Log("Cleaning up VMs with name %s" % vmname.name) oldVms = [vmname] else: Log("Cleaning up VMs with name %s" % vmname) oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: if oldVm.GetRuntime().GetPowerState() == PowerState.poweredOn: vm.PowerOff(oldVm) ftInfo = oldVm.config.ftInfo if ftInfo and ftInfo.role == 1: # If the VM is a primary, unregister all secondaries # before deleting the VM. ftMgr = host.GetFaultToleranceMgr(connect.GetSi()) WaitForTask(ftMgr.UnregisterSecondary(oldVm, None)) Log("Destroying VM") if useLlpm == True: vmConfig = oldVm.GetConfig() hw = vmConfig.GetHardware() if vmConfig.flags.faultToleranceType and \ vmConfig.flags.faultToleranceType == "recordReplay": hw.SetDevice([]) vmConfig.SetHardware(hw) llpm = invt.GetLLPM() llpm.DeleteVm(vmConfig) else: vm.Destroy(oldVm)
def TestVQATDestroyVM(vm): """ Destroy a VM. """ Log("Destroying VM") task = vm.Destroy() WaitForTask(task)
def RunCreateTest(name): vm1 = vm.CreateQuickDummy(name) vm.PowerOn(vm1) vm.PowerOff(vm1) cfgPath = vm1.GetConfig().GetFiles().GetVmPathName() vm1.Unregister() folder.Register(cfgPath) vm.Destroy(vm1)
def TestVmMigrate(self): self.banner(self.TestVmMigrate) if len(self._hosts) <= 1: VerboseLog(logInfo,"not enough hosts..skipping") vmname = "test_migrate_vvol_vm" self.CleanupVm(vmname) host1 = self._hosts[0] host2 = self._hosts[1] scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: VerboseLog(logTrivia, "{ Creating bulk: ") create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "create failed for host " + result.hostKey) raise Exception("unexpected failure") ds = create_task.info.result[0].ds; testvm = vm.CreateQuickDummy(vmname, host=host1, datastoreName=ds.name, dc=self._dc.name, numScsiDisks=1, memory=12) vm.PowerOn(testvm) migrate_task = testvm.Migrate(host2.parent.resourcePool, host2, Vim.VirtualMachine.MovePriority.highPriority, None) task.WaitForTask(migrate_task) vm.PowerOff(testvm) vm.Destroy(testvm) VerboseLog(logTrivia, "{ Removing bulk: ") delete_task = self._vasaMgr.RemoveVVolDatastore(ds, self._hosts) task.WaitForTask(delete_task) VerboseLog(logVerbose, delete_task.info.result) for result in delete_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "remove failed for host " + result.hostKey) raise Exception("unexpected failure in bulk remove") except: VerboseLog(logTrivia, traceback.format_exc()) ret=False VerboseLog(logInfo, "passed" if ret else "failed");
def testKeyOnlyRemove(si, vmxVersion, ds): """This test verifies that it is possible to remove devices by passing a VirtualDevice object with only key specified. This is a legacy behavior present in Foundry, supported only for compatibility reasons. It is not recommended to use this functionality in any products.""" suffix = ''.join( random.choice(string.letters + string.digits) for i in xrange(8)) vm1Name = '-'.join(['KeyOnlyRemove', suffix]) print('Creating %s VM on %s' % (vm1Name, ds)) task.WaitForTasks( [vm1.Destroy() for vm1 in folder.GetVmAll() if vm1.name == vm1Name]) vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=1, numIdeDisks=1, diskSizeInMB=1, nic=1, cdrom=1, datastoreName=ds, vmxVersion=vmxVersion) print('Testing device removal via VirtualDevice with key set on VM %s' % vm1Name) #gather all the devices we want to remove devices = [ vim.vm.device.VirtualDevice(key=d.key) for d in vm1.config.hardware.device if isinstance(d, (vim.vm.device.VirtualEthernetCard, vim.vm.device.VirtualDisk, vim.vm.device.VirtualCdrom)) ] #prepare a config spec containing VirtualDevice "abstract" objects with keys we want #to remove cspec = vim.vm.ConfigSpec() for device in devices: vmconfig.AddDeviceToSpec( cspec, device, vim.vm.device.VirtualDeviceSpec.Operation.remove) #reconfigure the VM task.WaitForTask(vm1.Reconfigure(cspec)) #verify that the devices are removed devices = [ vim.vm.device.VirtualDevice(key=d.key) for d in vm1.config.hardware.device if isinstance(d, (vim.vm.device.VirtualEthernetCard, vim.vm.device.VirtualDisk, vim.vm.device.VirtualCdrom)) ] if len(devices) != 0: raise Exception("Not all devices were deleted!") #destroy the vm print('Done testing, destroying %s VM' % vm1Name) vm.Destroy(vm1)
def VmTest(spec, dc, host, remove=True): if (not host): resPool = destResPool else: resPool = host.GetParent().GetResourcePool() print("vm resoure pool is " + str(resPool)) thisVm = vm.CreateFromSpec(spec, dc.GetName(), host, resPool) if remove: vm.Destroy(thisVm) else: return thisVm
def cleanupvm(vmName): print("cleaning up vm:'" + vmName + "'") vm1 = folder.Find(vmName) if vm1 is not None: try: vm.PowerOff(vm1) except Exception as e: print(e) try: vm.Destroy(vm1) except Exception as e: print(e)
def testAddDisk(options, online, shared): name = getUniqueVmName() machine = folder.Find(name) if machine: vm.Destroy(machine) machine = vm.CreateQuickDummy(name, datastoreName=options.datastore, scsiCtlrs=1) Log("CreateVM(%s, %s)" % (name, options.datastore)) if online: vm.PowerOn(machine) Log("PowerOn(%s)" % machine.name) addFlatDisk(options, machine, shared) addRdmDisk(options, machine, shared) if online: vm.PowerOff(machine) Log("PowerOff(%s)" % machine.name) vm.Destroy(machine)
def main(): # Process command line host = "jairam-esx" if len(sys.argv) > 1: host = sys.argv[1] try: si = Connect(host) atexit.register(Disconnect, si) vm.CreateQuickDummy("CpuIdTest") v1 = folder.Find("CpuIdTest") print("Created a dummy") # Print current. print(v1.GetConfig().GetCpuFeatureMask()) # Change level 0 and level 80 config = Vim.Vm.ConfigSpec() lvl0 = Vim.Vm.ConfigSpec.CpuIdInfoSpec() info = Vim.Host.CpuIdInfo() info.SetLevel(0) info.SetEax("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info.SetEbx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info.SetEcx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info.SetEdx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") lvl0.SetOperation("add") lvl0.SetInfo(info) lvl1 = Vim.Vm.ConfigSpec.CpuIdInfoSpec() info2 = Vim.Host.CpuIdInfo() info2.SetLevel(1) info2.SetVendor("amd") info2.SetEax("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") info2.SetEdx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX") lvl1.SetOperation("add") lvl1.SetInfo(info2) config.SetCpuFeatureMask([lvl0, lvl1]) print("Assigned features") task = v1.Reconfigure(config) if WaitForTask(task) == "error": raise task.GetInfo().GetError() vm.Destroy(v1) except Exception as e: print("Failed test due to exception: %s" % e) raise
def RelocateTest(srcHost, dc, dsName, host, backingType, deltaDiskFormat, diskSize, vmx, positive, test): msg = test + "positive =" + str(positive) + '; ' + \ "backing=" + backingType + '; ' + \ "delta=" + str(deltaDiskFormat) + '; ' + \ "vmx version=" + str(vmx) +'; ' + \ 'diskSize=' + str(diskSize) +'; ' + 'result=' try: spec = CreateVmSpec(backingType + "Vm", dsName, backingType, diskSize, vmx) vm1 = VmTest(spec, dc, srcHost, remove=False) vm.PowerOn(vm1) if (not host): resPool = destResPool else: resPool = host.GetParent().GetResourcePool() print("Vm migrate dest resoure pool is " + str(resPool)) vimutil.InvokeAndTrack(vm1.Migrate, resPool, host, "defaultPriority") if str(vm1.GetResourcePool()) != str(resPool): raise AssertionError(msg + "FAILURE, wrong place " + str(vm1.GetResourcePool()) + "Expected " + str(resPool)) ''' relocSpec = Vim.Vm.RelocateSpec() relocSpec.SetPool(resPool) relocSpec.SetHost(host) vimutil.InvokeAndTrack(vm1.Relocate, relocSpec) ''' except Vmodl.MethodFault as e: if not positive: print(msg + 'SUCCESS') print(e) else: print(msg + "FAILURE") raise except Exception: print(msg + "FAILURE") raise else: if positive: print(msg + 'SUCCESS') else: print(msg + "FAILURE, negative test through") raise AssertionError(msg + "FAILURE, negative test through") finally: if vm1: vm.Destroy(vm1)
def CleanupVm(vmname, useLlpm = False): Log("Cleaning up VMs with name " + vmname) oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: if oldVm.GetRuntime().GetPowerState() == \ Vim.VirtualMachine.PowerState.poweredOn: vm.PowerOff(oldVm) Log("Destroying VM") if useLlpm == True: vmConfig = oldVm.GetConfig() hw = vmConfig.GetHardware() hw.SetDevice([]) vmConfig.SetHardware(hw) llpm = invt.GetLLPM() llpm.DeleteVm(vmConfig) else: vm.Destroy(oldVm)
def CleanupVm(vmname, useLlpm=False): if isinstance(vmname, vim.VirtualMachine): Log("Cleaning up VMs with name %s" % vmname.name) oldVms = [vmname] else: Log("Cleaning up VMs with name %s" % vmname) oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: if oldVm.GetRuntime().GetPowerState() == PowerState.poweredOn: vm.PowerOff(oldVm) Log("Destroying VM") if useLlpm == True: vmConfig = oldVm.GetConfig() llpm = invt.GetLLPM() llpm.DeleteVm(vmConfig) else: vm.Destroy(oldVm)
def off(self): try: if self.vm == None: self.success = False print self.getClientId() + " off failed" return print "Removing snapshot" vm.RemoveAllSnapshots(self.vm) print "Powering off vm" vm.PowerOff(self.vm) print "Removing VM disk" self.delDisk() print "Destroying VM" vm.Destroy(self.vm) self.success = True print self.getClientId() + " off successful" except: self.success = False print self.getClientId() + " off failed"
def CleanupVm(vmname): si = connect.GetSi() Log("Cleaning up VMs with name " + vmname) oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: try: if oldVm is None or oldVm.config is None or oldVm.config.ftInfo is None: continue if oldVm.config.ftInfo.role != 1: continue if oldVm.GetRuntime().GetPowerState() == \ vim.VirtualMachine.PowerState.poweredOn: vm.PowerOff(oldVm) ftInfo = oldVm.config.ftInfo Log("Destroying VM") vmConfig = oldVm.GetConfig() hw = vmConfig.GetHardware() hw.SetDevice([]) vmConfig.SetHardware(hw) vm.Destroy(oldVm) except vmodl.fault.ManagedObjectNotFound as e: pass
def TestRemoveVvolDsWithVms(self): self.banner(self.TestRemoveVvolDsWithVms) VerboseLog(logTrivia, self._host) scId = self._sc vmname = "vvoldummy" self.CleanupExistingTestDatastores() self.CleanupVm(vmname) spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId); spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) testvm = None ds = None try: ds = self.CreateDs(spec) testvm = vm.CreateQuickDummy(vmname, host=self._host, datastoreName=ds.name, dc=self._dc.name, numScsiDisks=1, memory=10) self.removeDs(ds) except Vim.Fault.ResourceInUse: if testvm != None: vm.Destroy(testvm) if ds != None: self.removeDs(ds) pass except: VerboseLog(logInfo, traceback.format_exc()) VerboseLog(logInfo, 'failed') VerboseLog(logInfo, "passed")
def cleanup(si, options, force=False): ''' Try to remove everything possible. General cleanup, that can be called after every test. This should not throw or fail. ''' if options.nocleanup and not force: print("Not doing cleanup as requested") return vm1 = folder.Find(options.vmName) if vm1 != None: try: vm.PowerOff(vm1) except Exception as e: pass vm.Destroy(vm1) networkSystem = host.GetHostSystem(si).GetConfigManager().networkSystem try: networkSystem.RemoveVirtualSwitch(vswitchName=options.vsName) except Exception as e: pass
def RunTests(argv): # Can't use built-in help option because -h is used for hostname parser = OptionParser(add_help_option=False) parser.add_option('-h', '--host', dest='host', help='Host name', default='localhost') parser.add_option('-u', '--user', dest='user', help='User name', default='root') parser.add_option('-p', '--pwd', dest='pwd', help='Password') parser.add_option('-o', '--port', dest='port', help='Port', default=443, type='int') parser.add_option('-v', '--vmname', dest='vmname', help='temporary vm name', default="test-more-devices") parser.add_option('-n', '--numdisks', dest='numdisks', help='number of disks on each controller', default=27, type='int') parser.add_option('-c', '--numctlrs', dest='numCtlrs', help='number of controllers', default=1, type='int') parser.add_option('-d', '--datastore', dest='datastore', help='Datastore') parser.add_option('-s', '--scenario', dest='scenario', default=1, help='1: normal; 2: VM suspended; 3: VM poweredOn', type='int') parser.add_option('-?', '--help', '--usage', action='help', help='Usage information') (options, args) = parser.parse_args(argv) # Connect si = SmartConnect(host=options.host, user=options.user, pwd=options.pwd, port=options.port) atexit.register(Disconnect, si) vm1 = vm.CreateQuickDummy(options.vmname, vmxVersion="vmx-11", datastoreName=options.datastore) cspec = Vim.Vm.ConfigSpec() numCtlrs = options.numCtlrs if numCtlrs > 4: numCtlrs = 4 elif numCtlrs < 0: numCtlrs = 1 numdisks = options.numdisks if numdisks < 0 or numdisks > 254: Log("Invalid number of disks, use 16.") numdisk = 16 # Scenarioes allScenario = {1: normalScene, 2: suspendedScene, 3: poweredOnScene} scene = options.scenario if scene not in allScenario: Log("Invalid scenario specified, use scenario 1.") scene = 1 ctlrKeys = [] for i in range(numCtlrs): cspec = vmconfig.AddScsiCtlr(cspec, ctlrType="pvscsi") ctlrKeys.append(1000 + i) vm.Reconfigure(vm1, cspec) Log("Created VM with PVSCSI controller") allScenario[scene](vm1, numdisks, ctlrKeys, options.datastore) vm.Destroy(vm1) Log("Success!")
def PoweroffSuspendedVMTests(host, vmName, datastore): hostSystem = GetHostSystem(host) Log("Poweroff Suspended VM") testVm = vm.CreateQuickDummy(vmName, 1, datastoreName=datastore) tmpName = vmName + str(1) testVm1 = vm.CreateQuickDummy(tmpName, 1, datastoreName=datastore) # Poweron a VM, then suspend it. task = testVm.PowerOn() WaitForTask(task) task = testVm.Suspend() WaitForTask(task) # Now try powering off the VM task = testVm.PowerOff() WaitForTask(task) Log("Successfully powered of Suspended VM") Log("Doing Maintenance mode sanity checking") task = hostSystem.EnterMaintenanceMode(0) WaitForTask(task) Log("Entered Maintenance mode") task = hostSystem.ExitMaintenanceMode(0) WaitForTask(task) task = testVm.PowerOn() WaitForTask(task) task = testVm.Suspend() WaitForTask(task) Log("Powering off a suspended VM in maintenance mode") task = hostSystem.EnterMaintenanceMode(0) WaitForTask(task) try: task = testVm.PowerOff() WaitForTask(task) except Vim.Fault.InvalidState: print("Received InvalidState exception") task = hostSystem.ExitMaintenanceMode(0) WaitForTask(task) Log("Check maintenance mode ref count test 1") task = testVm.PowerOn() WaitForTask(task) task = testVm.Suspend() WaitForTask(task) Log("Power on 2nd VM") task = testVm1.PowerOn() WaitForTask(task) hostSystem.EnterMaintenanceMode(15) Log("Power off 2nd VM, host should go in maintenance mode now.") task = testVm1.PowerOff() WaitForTask(task) time.sleep(3) task = hostSystem.ExitMaintenanceMode(0) WaitForTask(task) Log("Check maintenance mode ref count test 2") task = testVm.PowerOff() WaitForTask(task) Log("Power on 2nd VM") task = testVm1.PowerOn() WaitForTask(task) hostSystem.EnterMaintenanceMode(15) Log("Power off 2nd VM, host should go in maintenance mode now.") task = testVm1.PowerOff() WaitForTask(task) time.sleep(3) task = hostSystem.ExitMaintenanceMode(0) WaitForTask(task) # Destroy the test VM. vm.Destroy(testVm) vm.Destroy(testVm1)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "Replay-VM", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) global status for i in range(numiter): vm1 = None # Cleanup from previous runs vm1 = folder.Find(vmname) if vm1 != None: vm1.Destroy() # Create new VM vm1 = vm.CreateQuickDummy(vmname, guest="winXPProGuest") Log("Using VM : " + vm1.GetName()) extraCfgs = vm1.GetConfig().GetExtraConfig() AddExtraConfig(extraCfgs, "replay.allowBTOnly", "TRUE") #AddExtraConfig(extraCfgs, "monitor.needreplay", "TRUE") cSpec = Vim.Vm.ConfigSpec() cSpec.SetFlags(Vim.vm.FlagInfo(recordReplayEnabled=True)) cSpec.SetExtraConfig(extraCfgs) task = vm1.Reconfigure(cSpec) WaitForTask(task) try: Log("Powering on the VM...") vm.PowerOn(vm1) Log("Powering off the VM...") vm.PowerOff(vm1) # StartRecording on a powered-off VM Log("Attempt to record a powered-off VM") try: vm.StartRecording(vm1, "Recording1", "Test recording") status = "FAIL" return except Vim.Fault.InvalidState as e: Log("Received InvalidState exception") # Power on the VM vm.PowerOn(vm1) # Start recording Log("Starting recording...") task = vm1.StartRecording("Recording1", "Test recording") WaitForTask(task) snapshot = task.GetInfo().GetResult() if snapshot == None: raise Exception("Got null result from StartRecording!") CheckState(vm1, Vim.VirtualMachine.RecordReplayState.recording) # Stop recording time.sleep(5) Log("Stopping recording...") vm.StopRecording(vm1) CheckState(vm1, Vim.VirtualMachine.RecordReplayState.inactive) # Check if the VM's snapshot is replayable snapInfo = vm1.GetSnapshot() rootSnapshotList = snapInfo.GetRootSnapshotList() rootSnapshot = rootSnapshotList[0] if rootSnapshot.GetReplaySupported() == False: raise Exception("Recorded Snapshot does not support replay!") Log("Using recorded snapshot " + rootSnapshot.GetName()) # Start replay Log("Initiating replay...") vm.StartReplaying(vm1, rootSnapshot.GetSnapshot()) CheckState(vm1, Vim.VirtualMachine.RecordReplayState.replaying) time.sleep(1) # Stop replay Log("Stopping replay...") vm.StopReplaying(vm1) CheckState(vm1, Vim.VirtualMachine.RecordReplayState.inactive) # Replay an invalid snapshot Log("Creating a dummy snapshot for replay") vm.CreateSnapshot(vm1, "dummySnapshot", "Dummy Snapshot", False, False) snapInfo = vm1.GetSnapshot() curSnap = snapInfo.GetCurrentSnapshot() Log("Attempt to replay dummy snapshot...") try: vm.StartReplaying(vm1, curSnap) except Exception as e: Log("Verified that attempt to replay invalid snapshot was rejected. " ) CheckState(vm1, Vim.VirtualMachine.RecordReplayState.inactive) Log("Powering off...") vm.PowerOff(vm1) # PR 773236, recordReplayEnabled=False means StartRecording should be # rejected. spec = Vim.vm.ConfigSpec(flags=Vim.vm.FlagInfo( recordReplayEnabled=False)) WaitForTask(vm1.Reconfigure(spec)) vm.PowerOn(vm1) try: WaitForTask(vm1.StartRecording("Recording2", "Test recording")) except Vim.Fault.RecordReplayDisabled as e: Log("Verified that attempt to start recording when disabled was rejected." ) Log("%s" % e) else: vm.StopRecording(vm1) vm.PowerOff(vm1) status = "FAIL" Log("StartRecording was allowed") return vm.PowerOff(vm1) except Exception as e: stackTrace = " ".join( traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) Log(stackTrace) status = "FAIL" return try: if vm1.GetRuntime().GetPowerState( ) == Vim.VirtualMachine.PowerState.poweredOn: Log("Powering off VM...") vm.PowerOff(vm1) Log("Deleting VM") vm.Destroy(vm1) except Exception as e: Log("Error deleting VM : " + str(e)) if status == "FAIL": break Log("Test status : " + str(status)) return
def test(si, delta, backingType, vmxVersion, ds): suffix = ''.join( random.choice(string.letters + string.digits) for i in xrange(8)) vm1Name = '-'.join(['LinkedParent', suffix]) print('Creating %s VM on %s' % (vm1Name, ds)) task.WaitForTasks( [vm1.Destroy() for vm1 in folder.GetVmAll() if vm1.name == vm1Name]) vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=1, datastoreName=ds, diskSizeInMB=1, vmxVersion=vmxVersion, backingType=backingType) vm1DirName = vm1.config.files.snapshotDirectory print('Creating Snapshot S1 for %s' % vm1Name) vm.CreateSnapshot(vm1, 'S1', '', False, False) s1 = vm1.snapshot.currentSnapshot disks = vmconfig.CheckDevice(s1.config, vim.vm.Device.VirtualDisk) if len(disks) != 1: raise Exception('Failed to find parent disk from snapshot') parent = disks[0].backing vm2Name = '-'.join(['LinkedChild', suffix]) print('Creating %s VM on %s' % (vm2Name, ds)) task.WaitForTasks( [vm2.Destroy() for vm2 in folder.GetVmAll() if vm2.name == vm2Name]) vm2 = vm.CreateQuickDummy(vm2Name, datastoreName=ds, vmxVersion=vmxVersion) vm2DirName = vm2.config.files.snapshotDirectory configSpec = vim.vm.ConfigSpec() configSpec = vmconfig.AddScsiCtlr(configSpec) configSpec = vmconfig.AddScsiDisk(configSpec, datastorename=ds, capacity=1024, backingType=backingType) child = configSpec.deviceChange[1].device.backing child.parent = parent child.deltaDiskFormat = delta # this edit is expected to fail configSpec = vmconfig.AddFloppy( configSpec, type="image", backingName= "[] /these/are/not/the/floppy/images/you/are/looking/for.flp") floppy = configSpec.deviceChange[2].device floppy.backing = None print('Reconfigure %s (1) adding a disk backed by snapshot of %s and (2) ' 'adding floppy backed by non-existent image. Expecting a failure' % (vm2Name, vm1Name)) try: vm.Reconfigure(vm2, configSpec) except Exception as e: pass else: raise Exception( 'Expected exception during %s reconfigure. But it succeeded instead' % vm2Name) print('Destroying %s' % vm2Name) vm.Destroy(vm2) print('Destroying %s' % vm1Name) vm.Destroy(vm1) hostSystem = host.GetHostSystem(si) datastoreBrowser = hostSystem.GetDatastoreBrowser() try: task.WaitForTask(datastoreBrowser.Search(vm1DirName)) except vim.fault.FileNotFound: pass else: raise Exception( "Expected that '%s' will be gone but it still present" % vm1DirName) try: task.WaitForTask(datastoreBrowser.Search(vm2DirName)) except vim.fault.FileNotFound: pass else: raise Exception( "Expected that '%s' will be gone but it still present" % vm2DirName)
def CleanupVm(vmname): vm1 = folder.Find(vmname) if vm1 != None: Log("Cleaning up VM " + vmname) vm.Destroy(vm1)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "Hw7ReconfigTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in range(numiter): bigClock = StopWatch() vm1 = None try: ## Cleanup old VMs posVmName = vmname + "_Pos_" + str(i) negVmName = vmname + "_Neg_" + str(i) CleanupVm(posVmName) CleanupVm(negVmName) ## Positive tests on a hwVersion 7 VM Log("Creating Hw7 VM..") vm1 = vm.CreateQuickDummy(posVmName, 0, 1, vmxVersion="vmx-07") # Test add & removal Vmxnet3 device to VM TestAddRemoveVmxet3Device(vm1) # Test add & removal of LsiLogicSAS controller to VM TestAddRemoveLsiLogicSasDevice(vm1) # Test enabling VAsserts in the VM. TestVAssertToggle(vm1, True) TestVAssertToggle(vm1, False) # Test VMCI device TestPosVMCIDevice(vm1) TestNegVMCIDevice(vm1, True) # Test PCI passthrough device TestPCIPassthroughDevice(vm1, True) ## Negative tests on a hwVersion 4 VM Log("Creating Hw4 VM..") vm2 = vm.CreateQuickDummy(vmname + "_Neg_" + str(i), 1) # Test add & removal Vmxnet3 device to VM TestAddRemoveVmxet3Device(vm2, False) # Test add & removal of LsiLogicSAS controller to VM TestAddRemoveLsiLogicSasDevice(vm2, False) # Test if VMCI device is present by default TestNegVMCIDevice(vm2, False) # Test adds of PCI passthrough device are disallowed TestPCIPassthroughDevice(vm2, False) Log("Destroying VMs") vm.Destroy(vm1) vm.Destroy(vm2) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def TestSimulatedVcClone(vmName, uuid): """ Test the code paths that VC excercises during cloning a VM with a dvs backing. """ print("Testing hostd code corresponding to clone") cleanupvm(vmName) envBrowser = invt.GetEnv() config = vm.CreateQuickDummySpec(vmName) cfgOption = envBrowser.QueryConfigOption(None, None) # Add a nic backed by a dvs portgroup pair. config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "invalidPg") try: vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None) except Vim.Fault.InvalidDeviceSpec: print("Test1: Caught invalid device spec as expected") else: raise "Test1: Create vm with invalid dvPortgroup backing didn't fail as expected" print("Test1: Create vm with invalid dvPortgroup backing failed as expected: PASS") config = vm.CreateQuickDummySpec(vmName) config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg1") try: vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None) except Exception: print("Failed to clone a VM to connect to a dvPortgroup") raise print("Test2: Create vm with valid dvPort backing: PASS") # Create a VM only specifying the dvs uuid in its backing. vm1 = folder.Find(vmName) vm.Destroy(vm1) config = vm.CreateQuickDummySpec(vmName) config = vmconfig.AddDvPortBacking(config, "", uuid, None, cfgOption, "") try: vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None) except Exception: print("Failed to clone a VM to connected to a standalone port") raise myVm = folder.Find(vmName) devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard) if not IsBackingPortNotAllocated(devices): print(devices) raise Exception ("Nic has a dvPort assigned to it or nic add failed") print("Test3: Create vm with valid dvs uuid specified in the dvsbacking (standalone): PASS") # Reconfigure a VM only specifying a dvs uuid in its backing for device in devices: if isinstance(device.GetBacking(),\ Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): cspec = Vim.Vm.ConfigSpec() device.GetConnectable().SetConnected(True) device.SetUnitNumber(9) vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.add) break try: task = myVm.Reconfigure(cspec) WaitForTask(task) except Exception: print("Test4: failed to add a device with only dvs backing specified") print("Test4: Reconfig VM specifying only the dvsUuid in backing: PASS") print("Testing simulate vc clone done")