def UnmountVolumes(): sw = StopWatch() task = storageSystem.UnmountVmfsVolumeEx(volumeUUIDs) WaitForTask(task) sw.finish(" unmount VMFS volumes") #TODO add str(len(volumeUUIDs)) time.sleep( 60) # rest a bit, in case un-expected things happens to do next op
def CreateDatastores(availableDisks): i = 0 sw = StopWatch() for availableDisk in availableDisks: createOptions = datastoreSystem.QueryVmfsDatastoreCreateOptions( availableDisk.deviceName) for createOption in createOptions: if type(createOption.info ) == Vim.Host.VmfsDatastoreOption.AllExtentInfo: i = i + 1 createOption.info.layout.partition[ 0].end.block /= random.choice([2, 3, 4, 5, 6]) diskPartInfo = storageSystem.ComputeDiskPartitionInfo( availableDisk.devicePath, createOption.info.layout) # Assign DS Name and Set the adjusted partition size # createOption.spec.vmfs.volumeName="ds_%s" % i createOption.spec.vmfs.volumeName = "ds_%04d" % i print("Creating datastore %s" % createOption.spec.vmfs.volumeName) createOption.spec.partition = diskPartInfo.spec bt = time.time() try: ds = datastoreSystem.CreateVmfsDatastore(createOption.spec) except Exception as e: print("Unable to create Datastore. Error: %s " % e) print("create one datastore took " + str(time.time() - bt) + " sec.") sw.finish(" create all datastores")
def QueryUnresolvedVmfsVolume(): if debug: print(" Calling QueryUnresolvedVmfsVolume() API") sw = StopWatch() vols[:] = storageSystem.QueryUnresolvedVmfsVolume() sw.finish(" query unresolved Vmfs Volumes") print("Found " + str(len(vols)) + " number of unresolved volumes.")
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["i:", "numiter="], "1", "Number of iterations", "iter"), (["t:", "test="], "", "Speciffic test to run", "test")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] allTests = {'firmware': mainTestFirmware, 'multicore': mainTestMulticore} args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() print("\nTests available:") for k in allTests.keys(): print("\t" + k) sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") test = args.GetKeyValue("test") status = "PASS" for i in range(numiter): bigClock = StopWatch() partialStatus = "PASS" if test == "": for k in allTests.keys(): partialStatus = allTests[k]() else: if test in allTests.keys(): partialStatus = allTests[test]() else: Log("Test '" + test + "' not found. Check usage for list of tests.") if partialStatus == "FAIL": status = "FAIL" Log("Tests completed.") bigClock.finish("iteration " + str(i)) Log("TEST RUN COMPLETE: " + status)
def RemoveDatastoresEx(): if len(vmfsDs) == 0: print("no interested datastore found!") return sw = StopWatch() #./vimbase/apiref/vim-apiref/vim.host.DatastoreSystem.html task = datastoreSystem.RemoveDatastoreEx(vmfsDs) #not sure if datastore is refreshing, it worked. WaitForTask(task) sw.finish(" remove VMFS datastoreEX")
class Phase: def __init__(self): self.phaseNumber = 0 self.phaseClock = StopWatch() def SetPhase(self, msg): Log("Phase " + str(self.phaseNumber) + ": " + msg + " completed") self.phaseClock.finish("phase " + str(self.phaseNumber)) self.phaseClock = StopWatch() self.phaseNumber = self.phaseNumber + 1
def RescanVmfs(): sw = StopWatch() storageSystem.RescanVmfs() #task = storageSystem.RescanVmfs() #WaitForTask(task) #if si is None: si = Vim.ServiceInstance("ServiceInstance", task._stub) #AttributeError: 'NoneType' object has no attribute '_stub' #we cannot measure latency use task here. Jus measure directly. #This function call is helpful! #../py.sh storagePerfBench/storageBenchPhyhost.py -h 10.133.251.208 -u 'root' -p '' -t rescanVmfs sw.finish("rescan VMFS") time.sleep(60)
def main(): """ Simple command-line program for creating virtual machines on a system managed by hostd. """ options = GetOptions() Connect(host=options.host, user=options.user, pwd=options.password) # Create vms envBrowser = GetEnv() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) for i in range(int(options.num_iterations)): vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i), options.num_scsi_disks, options.num_ide_disks, datastoreName=options.datastore_name, cfgOption=cfgOption, cfgTarget=cfgTarget) for _ in range(int(options.num_power_cycles)): clock = StopWatch() vm.PowerOn(vm1) clock.finish("PowerOn done") clock = StopWatch() vm.PowerOff(vm1) clock.finish("PowerOff done") # Delete the vm as cleanup if not options.dont_delete: task = vm1.Destroy() WaitForTask(task)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "CreateTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["unregister"], False, "Do an unregister of all registered vms", "unregister") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) dounreg = int(args.GetKeyValue("unregister")) # The unregister case if (dounreg): for v in folder.GetAll(): v.Unregister() sys.exit(0) # Create vms for i in range(numiter): clock = StopWatch() folder.Register("[storage1] " + vmname + "_" + str(i) + "/" + vmname + "_" + str(i) + ".vmx") clock.finish("Register done")
def mainTestFirmware(): Log("---[ TEST " + testName + " ]---") vmname = "HwV8_Firmware" status = "PASS" bigClock = StopWatch() vm1 = None try: macosVmName = vmname + "_MacOS" Log("Cleaning up VMs from previous runs...") vm.Delete(macosVmName, True) Log("Creating Mac OS VM..") vm1 = vm.CreateQuickDummy(macosVmName, vmxVersion="vmx-08", memory=4, guest="darwin11Guest") firmware = "efi" ChangeFirmware(vm1, firmware, True) if firmware != vm1.config.firmware: raise Exception("Firmware don't match set value") firmware = "bios" ChangeFirmware(vm1, firmware, True) if firmware != vm1.config.firmware: raise Exception("Firmware don't match set value") Log("Deleting VM " + macosVmName) vm.Delete(macosVmName, True) bigClock.finish(testName) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST [" + testName + "] COMPLETE: " + status) return status
def RemoveDatastores(): sw = StopWatch() for ds in vmfsDs: dsname = ds.info.name #easy to fail, try multiple times, most QE try 12 times(PR1457852, PR1422995) retry = 0 while retry < 1: # can try 12 if needed bt = time.time() try: datastoreSystem.RemoveDatastore(ds) except Exception as obj: print("try %d Failed to delete datastore: %s : Error Msg: %s" % (retry, ds, obj)) else: print("remove " + dsname + " took " + str(time.time() - bt) + " sec.") break retry += 1 #note: calculate individual latency may affect the overall latency sw.finish(" removing all datastores")
def main(): """ Simple command-line program for creating virtual machines on a system managed by hostd. """ options = GetOptions() Connect(host=options.host, user=options.user, namespace=newestVersions.GetNamespace('vim'), pwd=options.password) # Create vms envBrowser = GetEnv() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) for i in range(int(options.num_iterations)): vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i), options.num_scsi_disks, options.num_ide_disks, datastoreName=options.datastore_name, cfgOption=cfgOption, cfgTarget=cfgTarget) if options.opaquenetwork_id: config = Vim.Vm.ConfigSpec() config = vmconfig.AddOpaqueNetwork(config, cfgOption, opaqueNetworkId=options.opaquenetwork_id, \ opaqueNetworkType=options.opaquenetwork_type, \ externalId=options.externalID) vm.Reconfigure(vm1, config) for _ in range(int(options.num_power_cycles)): clock = StopWatch() vm.PowerOn(vm1) clock.finish("PowerOn done") clock = StopWatch() vm.PowerOff(vm1) clock.finish("PowerOff done") # Delete the vm as cleanup if not options.dont_delete: task = vm1.Destroy() WaitForTask(task)
def TestDVSLimits(si, uuid, dvsName): # Create the dvs. prodSpec = Vim.Dvs.ProductSpec(vendor="VMware", version = "6.5.0") dvsManager = si.RetrieveInternalContent().hostDistributedVirtualSwitchManager createSpec = Vim.Dvs.HostDistributedVirtualSwitchManager.DVSCreateSpec( uuid = uuid, name = dvsName, backing = Vim.Dvs.HostMember.PnicBacking(), productSpec = prodSpec, maxProxySwitchPorts = 64, modifyVendorSpecificDvsConfig = True, modifyVendorSpecificHostMemberConfig = True ) vmwsetting = Vim.Dvs.HostDistributedVirtualSwitchManager.VmwareDVSSettingSpec() createSpec.SetVmwareSetting(vmwsetting) dvsManager.CreateDistributedVirtualSwitch(createSpec) portgroupList = [] numIter = 512 print("testing early binding portgroups limits") for i in range(numIter): name = "pg" + str(i) pg = GeneratePortgroupCfg(name, "add", "earlyBinding") portgroupList.append(pg) bigClock = StopWatch() dvsManager.UpdateDVPortgroups(uuid, portgroupList) bigClock.finish("creating " + str(numIter) + " static pgs") ValidateEarlyBindingPgState(name) cleanup(si, uuid, "") print("testing ephemeral binding portgroups limits") dvsManager.CreateDistributedVirtualSwitch(createSpec) portgroupList = [] j = 0 for j in range(numIter): name = "pg" + str(j) pg = GeneratePortgroupCfg(name, "add", "ephemeral") portgroupList.append(pg) bigClock = StopWatch() dvsManager.UpdateDVPortgroups(uuid, portgroupList) bigClock.finish("creating " + str(numIter) + " ephemeral pgs") ValidateEphemeralPgState(name) cleanup(si, uuid, "")
def main(): """ Simple command-line program for creating virtual machines on a system managed by hostd. """ options = GetOptions() curSi = SmartConnect(host=options.host, user=options.user, pwd=options.password) # Create vms envBrowser = GetEnv() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) vmList = [] tasks = [] clock = StopWatch() for i in range(int(options.num_iterations)): vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i), options.num_scsi_disks, options.num_ide_disks, datastoreName=options.datastore_name, cfgOption=cfgOption, cfgTarget=cfgTarget) vmList.append(vm1) if options.opaquenetwork_id: config = Vim.Vm.ConfigSpec() config = vmconfig.AddOpaqueNetwork(config, cfgOption, opaqueNetworkId=options.opaquenetwork_id, \ opaqueNetworkType=options.opaquenetwork_type) task = vm1.Reconfigure(config) tasks.append(task) WaitForTasks(tasks) clock.finish("Reconfigure VMs done") # Delete the vm as cleanup if not options.dont_delete: clock = StopWatch() WaitForTasks([curVm.Destroy() for curVm in vmList]) clock.finish("Destroy VMs done")
def RescanHBA(): sw = StopWatch() storageSystem.RescanHba( "vmhba2") #TODO: need pass in an adapater,let me hardcode first. sw.finish("rescan ")
def MountVolumes(): sw = StopWatch() task = storageSystem.MountVmfsVolumeEx(volumeUUIDs) WaitForTask(task) sw.finish(" mount VMFS volumes") time.sleep(60)
def AttachLuns(): sw = StopWatch() task = storageSystem.AttachScsiLunEx(lunUUIDs) WaitForTask(task) sw.finish(" attach LUNs") time.sleep(360) # tried 180 still not enough
def DetachLuns(): sw = StopWatch() task = storageSystem.DetachScsiLunEx(lunUUIDs) WaitForTask(task) sw.finish(" detach LUNs") time.sleep(60)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "SerialPortTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in xrange(numiter): bigClock = StopWatch() vm1 = None try: Log("Cleaning up VMs from previous runs...") vm.Delete(vmname, True) Log("Creating VM..") vm1 = vm.CreateQuickDummy(vmname) Log("Adding serial port with device backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.DeviceBackingInfo) Log("Adding serial port with file backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.FileBackingInfo) Log("Adding serial port with pipe backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.PipeBackingInfo) Log("Adding serial port with URI backing") TestAddSerial(vm1, Vim.Vm.Device.VirtualSerialPort.URIBackingInfo) Log("Deleting VM..") vm.Delete(vmname, True) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "HotPlugTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in range(numiter): bigClock = StopWatch() vm1 = folder.Find(vmname) try: if vm1: Log("Powering on VM " + vm1.GetConfig().GetName()) if vm1.GetRuntime().GetPowerState( ) == Vim.VirtualMachine.PowerState.poweredOff: vm.PowerOn(vm1) ## Positive test for the vm TestDeviceHotPlugForVm(vm1, True) else: Log("Did not specify a vmname or the VM was not found. Using the default name HotPlugTest" ) posVmName = vmname + "_Pos_" + str(i) negVmName = vmname + "_Neg_" + str(i) Log("Cleaning up VMs from previous runs...") vm.Delete(posVmName, True) vm.Delete(negVmName, True) ## Positive tests on a hwVersion 8 VM Log("Creating Hw8 VM..") vm1 = vm.CreateQuickDummy(posVmName, vmxVersion="vmx-08", memory="1024", guest="rhel5Guest") Log("Powering on VM " + vm1.GetConfig().GetName()) vm.PowerOn(vm1) # Positive tests for hw8 VM TestDeviceHotPlugForVm(vm1, True) Log("Powering off and deleting VM " + vm1.GetName()) vm.Delete(posVmName, True) ## Positive tests on a hwVersion 7 VM Log("Creating Hw7 VM..") vm1 = vm.CreateQuickDummy(posVmName, vmxVersion="vmx-07", memory="1024", guest="rhel5Guest") Log("Powering on VM " + vm1.GetConfig().GetName()) vm.PowerOn(vm1) # Positive tests for hw7 VM TestDeviceHotPlugForVm(vm1, True) Log("Powering off and deleting VM " + vm1.GetName()) vm.Delete(posVmName, True) Log("Creating Hw4 VM..") vm2 = vm.CreateQuickDummy(negVmName, 1, vmxVersion="vmx-04") Log("Powering on VM " + negVmName) vm.PowerOn(vm2) # Negative tests for hw4 VM TestDeviceHotPlugForVm(vm2, False) Log("Powering off and deleting VM " + vm2.GetName()) vm.Delete(negVmName, True) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) if testFailedCount == 0: Log("TEST RUN COMPLETE: " + status) else: Log("TEST RUN COMPLETE: FAIL") Log("Number of total tests failed : " + str(testFailedCount))
def testPromoteDisks(si, numDisks, numiter, backingType, vmxVersion, ds1, ds2, status, resultsArray): for i in range(numiter): bigClock = StopWatch() try: try: vm1Name = "Parent" + str(i) vm1 = folder.Find(vm1Name) if vm1 != None: Log("Cleaning up old vm with name: " + vm1Name) vm1.Destroy() # Create a simple vm with numDisks on ds1 vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=numDisks, \ datastoreName=ds1, diskSizeInMB=1, \ vmxVersion=vmxVersion, \ backingType=backingType) Log("Created parent VM1 --" + vm1Name) vm1DirName = vm1.config.files.snapshotDirectory # Create snapshot vm.CreateSnapshot(vm1, "S1", "S1 is the first snaphost", \ False, False) snapshotInfo = vm1.GetSnapshot() S1Snapshot = snapshotInfo.GetCurrentSnapshot() Log("Created Snapshot S1 for VM1") # Get the name of the parent disks disks = vmconfig.CheckDevice(S1Snapshot.GetConfig(), \ Vim.Vm.Device.VirtualDisk) if len(disks) != numDisks: raise Exception("Failed to find parent disks") parentDisks = [None] * len(disks) for i in range(len(disks)): parentDisks[i] = disks[i].GetBacking().GetFileName() # Create a VM2 on ds2 that is linked off S1 vm2Name = "LinkedClone" + str(i) configSpec = vmconfig.CreateDefaultSpec(name=vm2Name, datastoreName=ds2) configSpec = vmconfig.AddScsiCtlr(configSpec) configSpec = vmconfig.AddScsiDisk(configSpec, datastorename=ds2, capacity=1024, backingType=backingType) configSpec.SetVersion(vmxVersion) childDiskBacking = configSpec.GetDeviceChange()[1].\ GetDevice().GetBacking() parentBacking = GetBackingInfo(backingType) parentBacking.SetFileName(parentDisks[0]) childDiskBacking.SetParent(parentBacking) childDiskBacking.SetDeltaDiskFormat("redoLogFormat") resPool = invt.GetResourcePool() vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool) vm2 = folder.Find(vm2Name) Log("Created child VM2 --" + vm2Name) vm2DirName = vm2.config.files.snapshotDirectory # create delta disks off VM1 on VM2 Log("Adding delta disks off VM1 to VM2") configSpec = Vim.Vm.ConfigSpec() for i in range(len(parentDisks)): configSpec = vmconfig.AddScsiDisk(configSpec, \ datastorename = ds2, \ cfgInfo = vm2.GetConfig(), \ backingType = backingType) SetDeltaDiskBacking(configSpec, i, parentDisks[i]) vimutil.InvokeAndTrack(vm2.Reconfigure, configSpec) Log("Power (on) vm1") vm.PowerOn(vm1) time.sleep(5) Log("Power (on) vm2") vm.PowerOn(vm2) time.sleep(5) # prepare promoteDisksSpec diskList = GetVirtualDisks(vm2) promoteDisksSpec = [None] * len(diskList) for i in range(len(diskList)): promoteDisksSpec[i]=vim.host.LowLevelProvisioningManager.\ PromoteDisksSpec() promoteDisksSpec[i].SetNumLinks(1) promoteDisksSpec[i].SetOffsetFromBottom(0) diskId = diskList[i].GetKey() promoteDisksSpec[i].SetDiskId(diskId) Log("Calling LLPM PromoteDisks") llpm = invt.GetLLPM() try: task = llpm.PromoteDisks(vm2, promoteDisksSpec) WaitForTask(task) except Exception as e: print(e) Log("Caught exception : " + str(e)) status = "FAIL" status = "PASS" Log("Destroying VMs") vm.PowerOff(vm2) time.sleep(5) vm.PowerOff(vm1) time.sleep(5) vm2.Destroy() vm1.Destroy() finally: bigClock.finish("iteration " + str(i)) except Exception as e: Log("Caught exception : " + str(e)) status = "FAIL" Log("TEST RUN COMPLETE: " + status) resultsArray.append(status) Log("Results for each iteration: ") for i in range(len(resultsArray)): Log("Iteration " + str(i) + ": " + resultsArray[i])
def main(): supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "Hw7ReconfigTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in range(numiter): bigClock = StopWatch() vm1 = None try: ## Cleanup old VMs vm1 = folder.Find(vmname) if vm1 != None: vm1.Destroy() Log("Creating virtual machine") vm1 = vm.CreateQuickDummy(vmname, 1, diskSizeInMB = 4096) devices = vmconfig.CheckDevice(vm1.GetConfig(), Vim.Vm.Device.VirtualDisk) if len(devices) < 1: raise Exception("Failed to find added disk!") cspec = Vim.Vm.ConfigSpec() for i in range(0, len(devices)) : disk = devices[i] backing = disk.GetBacking() backing.SetEagerlyScrub(True) disk.SetBacking(backing) vmconfig.AddDeviceToSpec(cspec, disk, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit) Log("Scrubbing existing disks of the VM") task = vm1.Reconfigure(cspec) WaitForTask(task) Log("Add a new scrubbed disk to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddScsiCtlr(cspec) vmconfig.AddScsiDisk(cspec, capacity = 128 * 1024, scrub = True) #task = vm1.Reconfigure(cspec) #WaitForTask(task) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "HotPlugTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" # Find a USB device on the host to passthrough envBrowser = invt.GetEnv() cfgTarget = envBrowser.QueryConfigTarget(None) if len(cfgTarget.usb) < 1: Log("No USB devices available for passthrough on " + args.GetKeyValue("host")) return device = cfgTarget.usb[0] for i in range(numiter): bigClock = StopWatch() vm1 = None try: vmname7 = vmname + "_HwV7" vmname8 = vmname + "_HwV8" Log("Cleaning up VMs from previous runs...") vm.Delete(vmname7, True) vm.Delete(vmname8, True) ## Positive tests on a hwVersion 7 VM Log("Creating Hw7 VM..") vm1 = vm.CreateQuickDummy(vmname7, vmxVersion="vmx-07", memory=4, guest="rhel5Guest") Log("Add a new USB controller to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddUSBCtlr(cspec) vm.Reconfigure(vm1, cspec) DoPlugTests(vm1, device, Vim.Vm.Device.VirtualUSBController, True) vm.Delete(vm1.name, True) ## Positive tests on a hwVersion 8 VM Log("Creating Hw8 VM..") vm1 = vm.CreateQuickDummy(vmname8, vmxVersion="vmx-08", memory=4, guest="rhel5Guest") Log("Add a new xHCI USB controller to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddUSBXHCICtlr(cspec) vm.Reconfigure(vm1, cspec) xhciCtlr = CheckDevice(vm1, Vim.Vm.Device.VirtualUSBXHCIController, "xHCI controller") DoPlugTests(vm1, device, Vim.Vm.Device.VirtualUSBXHCIController, True) Log("Add a new USB controller to the VM") cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.AddUSBCtlr(cspec) vm.Reconfigure(vm1, cspec) usbCtlr = CheckDevice(vm1, Vim.Vm.Device.VirtualUSBController, "USB controller") DoPlugTests(vm1, device, Vim.Vm.Device.VirtualUSBController, True) Log("Remove xHCI USB controller from the VM") cspec = vmconfig.RemoveDeviceFromSpec(Vim.Vm.ConfigSpec(), xhciCtlr) vm.Reconfigure(vm1, cspec) CheckNoDevice(vm1, Vim.Vm.Device.VirtualUSBXHCIController, "xHCI controller") Log("Remove USB controller from the VM") cspec = vmconfig.RemoveDeviceFromSpec(Vim.Vm.ConfigSpec(), usbCtlr) vm.Reconfigure(vm1, cspec) CheckNoDevice(vm1, Vim.Vm.Device.VirtualUSBController, "USB controller") vm.Delete(vm1.name, True) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def testVMSpec(): sw = StopWatch() envBrowser = GetEnv() sw.finish("getEnv") sw = StopWatch() cfgOption = envBrowser.QueryConfigOption(None, None) sw.finish("queryConfigOption") sw = StopWatch() cfgTarget = envBrowser.QueryConfigTarget(None) sw.finish("queryConfigTarget") sw = StopWatch() # vmSpec = Vim.Vm.ConfigSpec() # vmSpec = vmconfig.AddScsiCtlr(vmSpec, cfgOption, cfgTarget) # print(vmSpec) sw.finish("testVMSpec")
def main(): supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["d:", "ds="], None, "Datastore name", "ds"), (["r:", "rdm="], None, "Device path used in rdm creation", "rdm"), (["n:", "nas="], None, "Nas datastore creation info format:'host:share:dsname'", "nas"), # (["s:", "subdir="], "testvdm/", "Subdirectory in selected datastore as " # "possible destination for disks'", "subdir"), (["i:", "numiter="], "1", "Number of iterations", "iter") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["cleanup", "c"], True, "Try to cleanup test vms from previous runs", "cleanup")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to host " + args.GetKeyValue("host")) # Process command line numiter = int(args.GetKeyValue("iter")) doCleanup = args.GetKeyValue("cleanup") status = "PASS" resultsArray = [] serviceInstanceContent = si.RetrieveContent() vdiskMgr = serviceInstanceContent.GetVirtualDiskManager() hostSystem = host.GetHostSystem(si) hostConfigManager = hostSystem.GetConfigManager() global datastoreSystem datastoreSystem = hostConfigManager.GetDatastoreSystem() if vdiskMgr == None: Log("Virtual Disk Manager not found") sys.exit(0) for i in range(numiter): bigClock = StopWatch() try: try: ph = Phase() vdiskMgrTest = VirtualDiskManagerTest(vdiskMgr, args) vdiskMgrTest.RunTests() ph.SetPhase("Virtual Disk Manager Tests") status = "PASS" finally: bigClock.finish("iteration " + str(i)) # While debugging, uncomment the line below to see backtraces # when an exception occurs. except Exception as e: Log("Caught exception : " + str(e)) status = "FAIL" Log("TEST RUN COMPLETE: " + status) resultsArray.append(status) Log("Results for each iteration: ") for i in range(len(resultsArray)): Log("Iteration " + str(i) + ": " + resultsArray[i])
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["v:", "vmname="], "Hw7HotPlugTest", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd"), version="vim.version.version9") atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") status = "PASS" for i in range(numiter): bigClock = StopWatch() vm1 = None try: Log("Cleaning up VMs from previous runs...") vm.Delete(vmname, True) ## Positive tests on a hwVersion 7 VM Log("Creating Hw7 VM..") vm1 = vm.CreateQuickDummy(vmname, vmxVersion="vmx-07", memory=4, guest="rhel5Guest") Log("Powering on VM " + vm1.GetConfig().GetName()) vm.PowerOn(vm1) # Test hot plug of multiple devices TestHotPlugScsiCtlr(vm1) Log("Deleting VM") vm.Delete(vmname, True) Log("Tests completed.") bigClock.finish("iteration " + str(i)) except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def SetPhase(self, msg): Log("Phase " + str(self.phaseNumber) + ": " + msg + " completed") self.phaseClock.finish("phase " + str(self.phaseNumber)) self.phaseClock = StopWatch() self.phaseNumber = self.phaseNumber + 1
def testLinkedClone(si, numiter, deltaDiskFormat, backingType, vmxVersion, ds1, ds2, status, resultsArray): for i in range(numiter): bigClock = StopWatch() try: try: vm1Name = "LinkedParent_" + str(i) vm1 = folder.Find(vm1Name) if vm1 != None: Log("Cleaning up old vm with name: " + vm1Name) vm1.Destroy() # Create a simple vm with nothing but two disk on ds1 vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=2, \ datastoreName=ds1, diskSizeInMB=1, \ vmxVersion=vmxVersion, \ backingType=backingType) Log("Created parent VM1 --" + vm1Name + "with Native snapshotting" + " capability set to " + str(vm1.IsNativeSnapshotCapable())) vm1DirName = vm1.config.files.snapshotDirectory # Create snapshots # S1, S1C1, S1C1C1 and S1C2 vm.CreateSnapshot(vm1, "S1", "S1 is the first snaphost", \ False, False) snapshotInfo = vm1.GetSnapshot() S1Snapshot = snapshotInfo.GetCurrentSnapshot() Log("Create Snapshot S1 for VM1") vm.CreateSnapshot(vm1, "S1-C1", "S1-C1 is the first child of S1",\ False, False) snapshotInfo = vm1.GetSnapshot() S1C1Snapshot = snapshotInfo.GetCurrentSnapshot() Log("Create Snapshot S1C1 for VM1") vm.CreateSnapshot(vm1, "S1-C1-C1", \ "S1-C1-C1 is the grand child of S1", \ False, False) snapshotInfo = vm1.GetSnapshot() S1C1C1Snapshot = snapshotInfo.GetCurrentSnapshot() Log("Create Snapshot S1C1C1 for VM1") # revert to S1 vimutil.InvokeAndTrack(S1Snapshot.Revert) Log("Reverted VM1 to Snapshot S1C1") vm.CreateSnapshot(vm1, "S1-C2", \ "S1-C2 is the second child of S1", False, False) snapshotInfo = vm1.GetSnapshot() S1C2Snapshot = snapshotInfo.GetCurrentSnapshot() Log("Create Snapshot S1C2 for VM1") # revert to S1C1C1, so it is the current snapshot vimutil.InvokeAndTrack(S1C1C1Snapshot.Revert) Log("Reverted VM1 to Snapshot S1C1C1") # Get the name of the parent disks disks = vmconfig.CheckDevice(S1C2Snapshot.GetConfig(), \ Vim.Vm.Device.VirtualDisk) if len(disks) != 2: raise Exception("Failed to find parent disk1") parentDisk1 = disks[0].GetBacking().GetFileName() disks = vmconfig.CheckDevice(S1C1C1Snapshot.GetConfig(), Vim.Vm.Device.VirtualDisk) if len(disks) != 2: raise Exception("Failed to find parent disk2") parentDisk2 = disks[1].GetBacking().GetFileName() # Create a VM2 on ds2 that is linked off S1C2 vm2Name = "LinkedChild1_" + str(i) configSpec = vmconfig.CreateDefaultSpec(name = vm2Name, datastoreName = ds2) configSpec = vmconfig.AddScsiCtlr(configSpec) configSpec = vmconfig.AddScsiDisk(configSpec, datastorename = ds2, capacity = 1024, backingType = backingType) configSpec.SetVersion(vmxVersion) childDiskBacking = configSpec.GetDeviceChange()[1].GetDevice().GetBacking() parentBacking = GetBackingInfo(backingType) parentBacking.SetFileName(parentDisk1) childDiskBacking.SetParent(parentBacking) childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat) resPool = invt.GetResourcePool() vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool) vm2 = folder.Find(vm2Name) Log("Created child VM2 --" + vm2Name) vm2DirName = vm2.config.files.snapshotDirectory # Create a VM3 on ds2 that is linked off S1C1C1 vm3Name = "LinkedChild2_" + str(i) configSpec.SetName(vm3Name) parentBacking.SetFileName(parentDisk2) vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool) vm3 = folder.Find(vm3Name) Log("Created child VM3 --" + vm3Name) vm3DirName = vm3.config.files.snapshotDirectory # Create snapshot VM3S1 for VM3 vm.CreateSnapshot(vm3, "VM3S1", "VM3S1 is VM3 snaphost", False, False) Log("Create Snapshot VM3S1 for VM3") # Create snapshot VM3S2 for VM3 vm.CreateSnapshot(vm3, "VM3S2", "VM3S2 is VM3 snaphost", False, False) Log("Create Snapshot VM3S2 for VM3") snapshotInfo = vm3.GetSnapshot() VM3S2Snapshot = snapshotInfo.GetCurrentSnapshot() # get the disk name of VM3S2 so it can be configured as a # parent disk for VM2 disks = vmconfig.CheckDevice(VM3S2Snapshot.GetConfig(), Vim.Vm.Device.VirtualDisk) if len(disks) != 1: raise Exception("Failed to find parent disk2") parentDisk3 = disks[0].GetBacking().GetFileName() # create a delta disk off VM3S2 on VM2 Log("Adding delta disk off VM3S2 to VM2") configSpec = Vim.Vm.ConfigSpec() configSpec = vmconfig.AddScsiDisk(configSpec, \ datastorename = ds2, \ cfgInfo = vm2.GetConfig(), \ backingType = backingType) childDiskBacking = configSpec.GetDeviceChange()[0].GetDevice().GetBacking() parentBacking = GetBackingInfo(backingType) parentBacking.SetFileName(parentDisk3) childDiskBacking.SetParent(parentBacking) childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat) vimutil.InvokeAndTrack(vm2.Reconfigure, configSpec) Log("Power cycle VM1...") PowerCycle(vm1) Log("Power cycle VM2...") PowerCycle(vm2) Log("Power cycle VM3...") PowerCycle(vm3) Log("OP1: delete VM1.S1C2, then power cycle VM2") vimutil.InvokeAndTrack(S1C2Snapshot.Remove, True) PowerCycle(vm2) Log("OP2: destroy VM2, power cycle VM1") vimutil.InvokeAndTrack(vm2.Destroy) PowerCycle(vm1) Log("then recreate VM2 with just disk1") configSpec = vmconfig.CreateDefaultSpec(name = vm2Name, \ datastoreName = ds2) configSpec = vmconfig.AddScsiCtlr(configSpec) configSpec = vmconfig.AddScsiDisk(configSpec, datastorename = ds2, \ capacity = 1024, \ backingType = backingType) configSpec.SetVersion(vmxVersion) childDiskBacking = configSpec.GetDeviceChange()[1].GetDevice().GetBacking() parentBacking = GetBackingInfo(backingType) parentBacking.SetFileName(parentDisk1) childDiskBacking.SetParent(parentBacking) childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat) resPool = invt.GetResourcePool() vmFolder = invt.GetVmFolder() vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool) vm2 = folder.Find(vm2Name) Log("ReCreated child VM2 --" + vm2Name) Log("OP3: delete VM3S2, power cycle VM1, revert to S1C1") vimutil.InvokeAndTrack(VM3S2Snapshot.Remove, True) vimutil.InvokeAndTrack(S1C1Snapshot.Revert) PowerCycle(vm1) llpm = si.RetrieveInternalContent().GetLlProvisioningManager() Log("OP4: refresh VM2 disk and destroy the disk and its parent") llpm.ReloadDisks(vm2, ['currentConfig', 'snapshotConfig']) disks = vmconfig.CheckDevice(vm2.GetConfig(), \ Vim.Vm.Device.VirtualDisk) diskChain1 = disks[0] diskChain1.backing.parent.parent = None configSpec = Vim.Vm.ConfigSpec() configSpec = vmconfig.RemoveDeviceFromSpec(configSpec, \ diskChain1, "destroy") configSpec.files = vm2.config.files llpm.ReconfigVM(configSpec) Log("verify only the immediate parent is deleted") PowerCycle(vm1) Log("OP5: destroy VM1, power cycle VM3") vimutil.InvokeAndTrack(vm1.Destroy) PowerCycle(vm3) Log("OP6: Consolidate VM3 disk chain") disks = vmconfig.CheckDevice(vm3.GetConfig(), \ Vim.Vm.Device.VirtualDisk) shouldHaveFailed = 0 try: task = llpm.ConsolidateDisks(vm3, disks) WaitForTask(task) except Exception as e: shouldHaveFailed = 1 Log("Hit an exception when trying to consolidate cross " \ "snapshot point.") if shouldHaveFailed != 1: raise Exception("Error: allowed consolidation to merge snapshot") diskchain1 = disks[0] diskchain1.backing.parent.parent = None disks = vmconfig.CheckDevice(vm3.GetConfig(), \ Vim.Vm.Device.VirtualDisk) diskchain2 = disks[0] diskchain2.backing = diskchain2.backing.parent.parent disks = [] disks.append(diskchain1) disks.append(diskchain2) vimutil.InvokeAndTrack(llpm.ConsolidateDisks, vm3, disks) PowerCycle(vm3) Log("OP7: destroy VM2, no orphaned disks/files should have left") vimutil.InvokeAndTrack(vm2.Destroy) Log("Delete snapshot of VM3, and delete the disk with all parent." "then destroy vM3, no orphaned disks/files should have left") disks = vmconfig.CheckDevice(vm3.GetConfig(), \ Vim.Vm.Device.VirtualDisk) diskChain1 = disks[0] configSpec = Vim.Vm.ConfigSpec() configSpec = vmconfig.RemoveDeviceFromSpec(configSpec, \ diskChain1, "destroy") configSpec.files = vm3.config.files vimutil.InvokeAndTrack(llpm.ReconfigVM, configSpec) vimutil.InvokeAndTrack(vm3.Destroy) hostSystem = host.GetHostSystem(si) b = hostSystem.GetDatastoreBrowser() shouldHaveFailed = 0 try: vimutil.InvokeAndTrack(b.Search, vm1DirName) except Vim.Fault.FileNotFound: Log("Caught " + vm1DirName + "Not found as expected") shouldHaveFailed += 1 try: vimutil.InvokeAndTrack(b.Search, vm2DirName) except Vim.Fault.FileNotFound: Log("Caught " + vm2DirName + "Not found as expected") shouldHaveFailed += 1 try: vimutil.InvokeAndTrack(b.Search, vm3DirName) except Vim.Fault.FileNotFound: Log("Caught " + vm3DirName + "Not found as expected") shouldHaveFailed += 1 if shouldHaveFailed != 3: Log("Failed, orphaned disks left") raise Exception("orphaned disks") status = "PASS" finally: bigClock.finish("iteration " + str(i)) except Exception as e: Log("Caught exception : " + str(e)) status = "FAIL" Log("TEST RUN COMPLETE: " + status) resultsArray.append(status) Log("Results for each iteration: ") for i in range(len(resultsArray)): Log("Iteration " + str(i) + ": " + resultsArray[i])
def main(): supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["d:", "disk="], "/vmfs/devices/", "Disk", "disk"), (["s:", "ds="], "storage1", "Datastore 1", "ds"), (["f:", "file="], "[datastore1] rdm/rdm.vmdk", "Virtual Disk", "file"), (["v:", "vmname="], "RdmVM", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["runall", "r"], True, "Run all the tests", "runall"), (["nodelete"], False, "Dont delete vm on completion", "nodelete") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) # Process command line vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) runall = args.GetKeyValue("runall") noDelete = args.GetKeyValue("nodelete") disk = args.GetKeyValue("disk") ds = args.GetKeyValue("ds") rdmDiskFile = args.GetKeyValue("file") status = "PASS" for i in range(numiter): bigClock = StopWatch() vm1 = None try: ## Cleanup old VMs vm1 = folder.Find(vmname) if vm1 != None: vm1.Destroy() Log("Creating VM: " + str(vmname)) ## Add scsi disk Log("Adding a new rdm disk to VM: " + str(vmname)) cspec = Vim.Vm.ConfigSpec() cspec = vmconfig.CreateDefaultSpec(name = vmname, datastoreName = ds) cspec = vmconfig.AddScsiCtlr(cspec) # Get config options and targets cfgOption = vmconfig.GetCfgOption(None) cfgTarget = vmconfig.GetCfgTarget(None) rdmBacking = Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo() rdmBacking.SetFileName(""); rdmBacking.SetDeviceName(disk); rdmBacking.SetCompatibilityMode("physicalMode"); rdmBacking.SetDiskMode(""); rdmBacking.SetParent(None); diskDev = Vim.Vm.Device.VirtualDisk() diskDev.SetKey(vmconfig.GetFreeKey(cspec)) diskDev.SetBacking(rdmBacking) ctlrs = vmconfig.GetControllers(cfgOption, Vim.Vm.Device.VirtualSCSIController, None, cspec) # XXX Fix this up for ctlrIdx in range(len(ctlrs)): freeSlot = vmconfig.GetFreeSlot(cspec, None, cfgOption, ctlrs[ctlrIdx]) if (freeSlot >= 0): diskDev.SetControllerKey(ctlrs[ctlrIdx].GetKey()) diskDev.SetUnitNumber(-1) diskDev.SetCapacityInKB(long(4096)) break vmconfig.AddDeviceToSpec(cspec, diskDev, \ Vim.Vm.Device.VirtualDeviceSpec.Operation.add, \ Vim.Vm.Device.VirtualDeviceSpec.FileOperation.create) Log("create VM: " + str(vmname) + " with the RDM disk") vmFolder = vm.GetVmFolder() resPool = vm.GetResourcePool() task = vmFolder.CreateVm(cspec, resPool) WaitForTask(task) Log("Finished Reconfiguring VM: " + str(vmname)); vm1 = task.info.result Log("Now reconfiguring VM: " + str(vmname)); cspec = Vim.Vm.ConfigSpec() rdmBacking = Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo() rdmBacking.SetFileName(rdmDiskFile); rdmBacking.SetCompatibilityMode("physicalMode"); rdmBacking.SetDiskMode("persistent"); rdmBacking.SetParent(None); diskDev = Vim.Vm.Device.VirtualDisk() diskDev.SetKey(vmconfig.GetFreeKey(cspec)) diskDev.SetBacking(rdmBacking) ctlrs = vmconfig.GetControllers(cfgOption, Vim.Vm.Device.VirtualSCSIController, vm1.GetConfig(), cspec) # XXX Fix this up for ctlrIdx in range(len(ctlrs)): freeSlot = vmconfig.GetFreeSlot(cspec, vm1.GetConfig(), cfgOption, ctlrs[ctlrIdx]) if (freeSlot >= 0): diskDev.SetControllerKey(ctlrs[ctlrIdx].GetKey()) diskDev.SetUnitNumber(-1) diskDev.SetCapacityInKB(long(4096)) break vmconfig.AddDeviceToSpec(cspec, diskDev, \ Vim.Vm.Device.VirtualDeviceSpec.Operation.add, \ Vim.Vm.Device.VirtualDeviceSpec.FileOperation.create) vm.Reconfigure(vm1, cspec) task = vmFolder.ReconfigVm(cspec, resPool) WaitForTask(task) Log("Finished Reconfiguring VM: " + str(vmname)); except Exception as e: status = "FAIL" Log("Caught exception : " + str(e)) Log("TEST RUN COMPLETE: " + status)
def __init__(self): self.phaseNumber = 0 self.phaseClock = StopWatch()