def CheckFTRole(vm, role): with LogSelfOp() as logOp: ftInfo = vm.GetConfig().GetFtInfo() if ftInfo == None: raise Exception("No FT info configured for this VM") if ftInfo.GetRole() != role: raise Exception("FT role of VM does not match " + str(role))
def WaitForFTRole(vm, role, nsec=120): for i in range(nsec): ftInfo = vm.GetConfig().GetFtInfo() if ftInfo and ftInfo.GetRole() == role: return time.sleep(1) raise Exception("FT role of VM does not match %s" % role)
def CheckFTRole(vm, role, si=None): ftInfo = vm.GetConfig().GetFtInfo() if ftInfo == None: raise Exception("No FT info configured for this VM") if ftInfo.GetRole() != role: raise Exception("FT role of VM does not match " + str(role)) Log("Verified FT role of VM")
def FindVmByName(self, vmname): """Returns Vim.VirtualMachine managed object.""" for vm in self.GetVmList(): try: cfg = vm.GetConfig() if cfg != None and cfg.GetName() == vmname: return vm except Vmodl.Fault.ManagedObjectNotFound: pass return None
def delDisk(self, vm): config = vm.GetConfig() configspec = Vim.Vm.ConfigSpec() deviceChange = [] for disk in config.GetHardware().GetDevice(): if isinstance(disk, Vim.Vm.Device.VirtualDisk): if disk.GetUnitNumber() == 4: break diskSpec = Vim.Vm.Device.VirtualDeviceSpec() diskSpec.SetOperation("remove") diskSpec.SetFileOperation("destroy") diskSpec.SetDevice(disk) deviceChange.append(diskSpec) configspec.SetDeviceChange(deviceChange) print "Reconfigure spec: ", configspec task = vm.Reconfigure(configspec) WaitForTask(task)
def CreateProtectedVM(ovfuri, priHostd, vmname, datastore, stdchurn=False, churnrate=None, disks=[], cpfiles=[], script=None, stdblast=False, baseDiskType=Vim.VirtualDiskManager.VirtualDiskType.thin, snapDepth=0, skipDisks=[]): """ Common routine for creating a 'churn' VM and configuring it to run a simple startup script. @param[in] priHostd primary hostd connection object @param[in] vmname The name of the VM (used for displayname and vmx config file name, too) @param[in] datastore The name of the datastore to put the VM's files on (in priHostd) @param[in] scorch A bool indicating if any existing destDir should be removed @param[in] stdchurn A bool indicating if a "standard" churn configuration should be generated @param[in] churnrate A string "KB/min[,KBkeep][,RandomY/N][,Unmap %][,Compress %]" describing the argument to diskchurn2.sh @param[in] disks An (optional) list of disks (given by a string size in GB, can be fractional) to attach to VM @param[in] cpfiles An (optional) list of arbitrary files to copy into a fixed directory in the VM @param[in] script An (optional) script to run @param[in] stdblast A bool indicating if a "standard" disk blast configuration should be generated @param[in] baseDiskType The disk type to use when creating the base disk. @param[in] snapDepth Make each disk a child disk this number of snapshots deep. Optional parameters may be none. The disks specified are in addition to the 10MB base disk created for the VM. """ deployment = ovflib.OVFManagerDeployment(ovfuri, priHostd) deployment.SetDatastore(datastore) deployment.SetName(vmname) vm = priHostd.GetPyVimVM(deployment.Deploy(powerOn=False)) vmPathName = ParseDatastorePath(vm.GetConfig().GetFiles().GetVmPathName()) destDir = os.path.dirname(vmPathName[1]) priDs = priHostd.RemoteStorage(datastore) if not disks: disks = [] if stdchurn or stdblast: # Add the parameters (if necesssary) to make a simple churn VM # A 1GB disk, the 'diskchurn.sh' script, and a command line that # runs diskchurn if not disks: disks = [ "1", ] # one GB disk if stdchurn and stdblast: raise RuntimeError, "Can't do both churn and blast at the same time." devLabels = DevLabelsFor(disks, skipDisks) if stdchurn or stdblast: if stdchurn: cpfiles = cpfiles + [ FindChurnScript() ] + FindChurnExtras() else: cpfiles = cpfiles + [ FindBlastScript() ] + FindBlastExtras() if not script: if not churnrate: churnrate = "" churnInArgs=churnrate.split(",") churnOutArgs="" if stdchurn: # # Parse the churn arguments. # if len(churnInArgs) >= 1 and churnInArgs[0]: churnOutArgs="%s -c %s" % (churnOutArgs, churnInArgs[0]) if len(churnInArgs) >= 2 and churnInArgs[1]: churnOutArgs="%s -s %s" % (churnOutArgs, churnInArgs[1]) if len(churnInArgs) >= 3 and churnInArgs[2]: if churnInArgs[2] == "y" or churnInArgs[2] == "Y": churnOutArgs="%s -r" % churnOutArgs if len(churnInArgs) >= 4 and churnInArgs[3]: churnOutArgs="%s -u %s" % (churnOutArgs, churnInArgs[3]) if len(churnInArgs) >= 5 and churnInArgs[4]: churnOutArgs="%s -k %s" % (churnOutArgs, churnInArgs[4]) if len(churnInArgs) >= 6 and churnInArgs[5]: churnOutArgs="%s -w %s" % (churnOutArgs, churnInArgs[5]) if len(churnInArgs) >= 7 and churnInArgs[6]: churnOutArgs="%s -y %s" % (churnOutArgs, churnInArgs[6]) script = 'echo "Automated Disk Churnery:"\n' + \ 'export PATH=".:${PATH}"\n' + \ 'for disk in %s; do\n' % ' '.join(devLabels) + \ ' usleep 100\n' + \ ' diskchurn2.sh %s /dev/sd${disk} &\n' % (churnOutArgs) + \ 'done\n' + \ 'sleep 1\n' + \ 'wait\n' else: # # Parse the blast arguments # if len(churnInArgs) >= 1 and churnInArgs[0]: if churnInArgs[0] == "y" or churnInArgs[0] == "Y": churnOutArgs="%s -r" % churnOutArgs if len(churnInArgs) >= 2 and churnInArgs[1]: churnOutArgs="%s -u %s" % (churnOutArgs, churnInArgs[1]) if len(churnInArgs) >= 3 and churnInArgs[2]: churnOutArgs="%s -k %s" % (churnOutArgs, churnInArgs[2]) script = 'echo "Automated Disk Blastery:"\n' + \ 'export PATH=".:${PATH}"\n' + \ 'for disk in %s; do\n' % ' '.join(devLabels) + \ ' usleep 100\n' + \ ' diskblast.sh %s /dev/sd${disk} &\n' % (churnOutArgs) + \ 'done\n' + \ 'sleep 1\n' + \ 'wait\n' cspec = Vim.Vm.ConfigSpec() envBrowser = priHostd.GetHostComputeResources().GetEnvironmentBrowser() cfgOption = envBrowser.QueryConfigOption(None, None) cfgTarget = envBrowser.QueryConfigTarget(None) cspec = pyVim.vmconfig.AddScsiCtlr(cspec, cfgOption=cfgOption, cfgTarget=cfgTarget, ctlrType='lsilogic') for disk in disks: capacityInKB = long(float(disk) * 1024 * 1024) capacityInB = long(capacityInKB * 1024) cspec = pyVim.vmconfig.AddDisk(cspec, cfgOption=cfgOption, cfgTarget=cfgTarget, thin=True, capacity=capacityInKB, capacityInBytes=capacityInB, datastorename=datastore) floppy = "%s.floppy" % vmname cspec = pyVim.vmconfig.AddFloppy(cspec, cfgOption=cfgOption, cfgTarget=cfgTarget, type='image', backingName=priDs.DatastorePath( '{}/{}'.format(destDir, floppy))) localdir = tempfile.mkdtemp(prefix="hbrvmcfg.") # Create the ttylinux boot floppy image cmd = CreateTestVmFloppy(localdir, os.path.join(localdir, floppy), script, cpfiles, devLabels) # Copy everything up to the host CopyFiles(localdir, priHostd.Hostname(), datastore, destDir, priHostd.Username(), priHostd.Password()) vm.Reconfigure(cspec) # Clean up the local copy of the VM shutil.rmtree(localdir) return vm
def GetVirtualDisks(vm): return list( filter(lambda device: isinstance(device, vim.vm.Device.VirtualDisk), vm.GetConfig().GetHardware().GetDevice()))
def GetPyVimVM(self, vm): propC = self.si.content.propertyCollector resPool = vm.resourcePool vmConfig = vm.GetConfig() return pyVim.vm.VM(vm, propC, resPool, vmConfig)
def DescribeVm(vm): return vm.GetConfig().GetFiles().GetVmPathName()
def main(): supportedArgs = [(["h:", "host="], "localhost", "Host name of VC/host", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd")] supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: Usage() sys.exit(0) # Connect si = Connect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) # Initialize the lun uuid list lunMap = {} # Get all vms vmList = invt.findVms("", "") if vmList == None: print "No virtual machines found in the inventory" sys.exit(0) # Walk the vm list, looking for rdms for vmInfo in vmList: vm = vmInfo[0] config = vm.GetConfig() # Skip invalid vms without information available. if config == None: print "Skipping an invalid virtual machine without any identifier" continue debugStmt = "DEBUG: Examining vm " + config.GetName() + " ..." devices = config.GetHardware().GetDevice() # Get list of rdms available on vm rdms = [ device.GetBacking() for device in devices \ if isinstance(device, Vim.Vm.Device.VirtualDisk) and \ isinstance(device.GetBacking(), \ Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo) ] # Add the listed rdms to a map if len(rdms) > 0: for rdm in rdms: lunid = rdm.GetLunUuid() currentList = [] if lunMap.has_key(lunid): # Ooh, a duplicate. currentList = lunMap[lunid] currentList.append(config.GetName()) lunMap[lunid] = currentList debugStmt += ",".join([rdm.GetDeviceName() for rdm in rdms]) #print debugStmt # Check out the results print "" conflicts = 0 for key, val in lunMap.iteritems(): if len(val) > 1: if conflicts == 0: print "LUNs used by more than 1 virtual machine: " print "------------------------------------------" conflicts += 1 print "" print "LUN ID : " + key print "Virtual machines: " + (", ").join(val) if conflicts == 0: print "No LUNs used by more than 1 virtual machine. System is clean" print "" print ""