def CleanupVm(vmname, useLlpm=False): with LogSelfOp() as logOp: if isinstance(vmname, vim.VirtualMachine): Log("Cleaning up VMs with name %s" % vmname.name) oldVms = [vmname] else: Log("Cleaning up VMs with name %s" % vmname) oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: if oldVm.GetRuntime().GetPowerState() == PowerState.poweredOn: vm.PowerOff(oldVm) ftInfo = oldVm.config.ftInfo if ftInfo and ftInfo.role == 1: # If the VM is a primary, unregister all secondaries # before deleting the VM. ftMgr = host.GetFaultToleranceMgr(connect.GetSi()) WaitForTask(ftMgr.UnregisterSecondary(oldVm, None)) Log("Destroying VM") if useLlpm == True: vmConfig = oldVm.GetConfig() hw = vmConfig.GetHardware() if vmConfig.flags.faultToleranceType and \ vmConfig.flags.faultToleranceType == "recordReplay": hw.SetDevice([]) vmConfig.SetHardware(hw) llpm = invt.GetLLPM() llpm.DeleteVm(vmConfig) else: vm.Destroy(oldVm)
def WaitForPowerState(vm, si, powerState, nsec = 20): saveSi = connect.GetSi() connect.SetSi(si) for i in range(nsec): if vm.GetRuntime().GetPowerState() != powerState: time.sleep(1) if vm.GetRuntime().GetPowerState() != powerState: raise Exception("VM did not transition to expected power state!") connect.SetSi(saveSi)
def CheckFTState(vm, state, si = None, isPrimary = True): prevSi = None if si != None: prevSi = connect.GetSi() connect.SetSi(si) ftState = vm.GetRuntime().GetFaultToleranceState() if ftState != state: raise Exception( "Runtime FT state " + str(ftState) + " not set to " + str(state)) Log("Verified runtime fault tolerance state as " + str(state))
def ReloadSecondary(si, vm1): with LogSelfOp() as logOp: curSi = connect.GetSi() connect.SetSi(si) vmname = vm1.GetConfig().GetName() Log("Reloading secondary VM") vm1.Reload() vm2 = folder.Find(vmname) if vm2 == None: raise Exception("Reload caused the VM to go invalid") connect.SetSi(curSi)
def WaitForPowerState(vm, si, powerState, nsec=40): with LogSelfOp() as logOp: saveSi = connect.GetSi() connect.SetSi(si) for i in range(nsec): if vm.GetRuntime().GetPowerState() != powerState: time.sleep(1) if vm.GetRuntime().GetPowerState() != powerState: raise Exception("%s: VM did not transition to expected power state!" % \ DescribeVm(vm)) connect.SetSi(saveSi)
def CleanupDir(datastoreName, folderName): si = connect.GetSi() fileMgr = si.content.fileManager try: WaitForTask( fileMgr.Delete(datastorePath="[%s] %s" % (datastoreName, folderName), fileType=vim.FileManager.FileType.File)) except vim.fault.FileNotFound as e: pass except Exception as e: Log("Error cleaning up %s: %s" % (folderName, e))
def main(): parser = OptionParser() parser.add_option('-v', '--vc', dest='vc', default='localhost', help='VC to connect to') parser.add_option('-u', '--user', dest='user', default='root', help='User name') parser.add_option('-p', '--password', dest='password', default='vmware', help='Password') parser.add_option('-e', '--esx', dest='host', help='Host name') parser.add_option('-m', '--vm', dest='vm', help='VM name') parser.add_option('-i', '--iteration', dest='iteration', default='1000', help='Number of iterations') parser.add_option('-b', '--bench', dest='bench', help='comma-separated benchmark names') (options, args) = parser.parse_args() connect.Connect(host=options.vc, user=options.user,\ pwd=options.password, version='vpx.version.version9') si = connect.GetSi() if si is None: return vpxSi = Vpx.ServiceInstance("VpxdInternalServiceInstance", si._GetStub()) benchMgr = vpxSi.debugManager.benchmarkManager host = FindHostByName(si, options.host) # could be None vm = FindVmByName(si, options.vm) # could be None nanobench = NanoBench(benchMgr, options.iteration, host, vm) try: nanobench.setRunList(options.bench) nanobench.run() finally: si.content.sessionManager.Logout()
def build_task_filter(task): """A helper that builds a filter for a particular task object. This method builds a property filter for use with a task object and :rtype vim.PropertyFilter: property filter for this object """ pc = connect.GetSi().content.propertyCollector obj_spec = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)] prop_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, pathSet=[], all=True) filter_spec = vmodl.query.PropertyCollector.FilterSpec() filter_spec.objectSet = obj_spec filter_spec.propSet = [prop_spec] filter = pc.CreateFilter(filter_spec, True) return filter
def __init__(self): threading.Thread.__init__(self) si = connect.GetSi() self.sc = si.RetrieveContent() self.pc = self.sc.GetPropertyCollector() tm = self.sc.GetTaskManager() # Create a blank filter spec. filterSpec = Vmodl.Query.PropertyCollector.FilterSpec() objectSet = [] propSet = [] # Set TaskManager as the root object for the filter. objectSpec = Vmodl.Query.PropertyCollector.ObjectSpec() objectSpec.SetObj(tm) objectSpec.SetSkip(True) objectSet.append(objectSpec) # Tasks are listed by the "recentTask" array property of TaskManager. travSpec = Vmodl.Query.PropertyCollector.TraversalSpec() travSpec.SetName("traverseTasks") travSpec.SetPath("recentTask") travSpec.SetSkip(False) travSpec.SetType(tm.__class__) objectSpec.SetSelectSet([travSpec]) # In a Task, we are interested in the info property. propSpec = Vmodl.Query.PropertyCollector.PropertySpec() propSpec.SetType(Vim.Task) propSpec.SetPathSet(["info"]) propSet.append(propSpec) filterSpec.SetObjectSet(objectSet) filterSpec.SetPropSet(propSet) # Create the task filter self.taskFilter = self.pc.CreateFilter(filterSpec, True) # Create a dictionary of known task IDs to states self.taskMap = {} self.mapLock = threading.Lock()
def CleanupVm(vmname): si = connect.GetSi() Log("Cleaning up VMs with name " + vmname) oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: try: if oldVm is None or oldVm.config is None or oldVm.config.ftInfo is None: continue if oldVm.config.ftInfo.role != 1: continue if oldVm.GetRuntime().GetPowerState() == \ vim.VirtualMachine.PowerState.poweredOn: vm.PowerOff(oldVm) ftInfo = oldVm.config.ftInfo Log("Destroying VM") vmConfig = oldVm.GetConfig() hw = vmConfig.GetHardware() hw.SetDevice([]) vmConfig.SetHardware(hw) vm.Destroy(oldVm) except vmodl.fault.ManagedObjectNotFound as e: pass
def CleanupTestVMs(pcThread): content = connect.GetSi().GetContent() viewMgr = content.GetViewManager() vmView = viewMgr.CreateContainerView(content.rootFolder, [Vim.VirtualMachine], True) tasks = [] vms = vmView.GetView() Log("main", "Cleanup %d test VMs" % len(vms)) for v in vms: if vm.IsPoweredOn(v): tasks.append(v.PowerOff()) # Wait for all PowerOff tasks for t in tasks: pcThread.WaitForTaskUpdate(t) # Destroy all test VMs tasks = [] for v in vms: tasks.append(v.Destroy()) # Wait for all Destroy tasks for t in tasks: pcThread.WaitForTaskUpdate(t)
def wait_for_task(task, *args, **kwargs): """A helper method for blocking 'wait' based on the task class. This dynamic helper allows you to call .wait() on any task to keep the python process from advancing until the task is completed on the vCenter or ESX host on which the task is actually running. Usage Examples ============== This method can be used in a number of ways. It is intended to be dynamically injected into the vim.Task object and these samples indicate that. The method may, however, be used free-standing if you prefer. Given an initial call similar to this... code:: rename_task = datastore.Rename('new_name') simple use case =============== code:: rename_task.wait() The main python process will block until the task completes on vSphere. use with callbacks ================== Simple callback use... code:: def output(task, *args): print task.info.state rename_task.wait(queued=output, running=output, success=output, error=output) Only on observed task status transition will the callback fire. That is if the task is observed leaving queued and entering running, then the callback for 'running' is fired. :type task: vim.Task :param task: any subclass of the vim.Task object :rtype None: returns or raises exception :raises vim.RuntimeFault: """ def no_op(task, *args): pass queued_callback = kwargs.get('queued', no_op) running_callback = kwargs.get('running', no_op) success_callback = kwargs.get('success', no_op) error_callback = kwargs.get('error', no_op) si = connect.GetSi() pc = si.content.propertyCollector filter = build_task_filter(task) try: version, state = None, None # Loop looking for updates till the state moves to a completed state. waiting = True while waiting: update = pc.WaitForUpdates(version) version = update.version for filterSet in update.filterSet: for objSet in filterSet.objectSet: task = objSet.obj for change in objSet.changeSet: if change.name == 'info': state = change.val.state elif change.name == 'info.state': state = change.val else: continue if state == vim.TaskInfo.State.success: success_callback(task, *args) waiting = False elif state == vim.TaskInfo.State.queued: queued_callback(task, *args) elif state == vim.TaskInfo.State.running: running_callback(task, *args) elif state == vim.TaskInfo.State.error: error_callback(task, *args) raise task.info.error finally: if filter: filter.Destroy()
def GetContainerObjects(container, types, recursive): viewMgr = connect.GetSi().GetContent().GetViewManager() view = viewMgr.CreateContainerView(container, types, recursive) objects = view.GetView() view.Destroy() return objects