def esxi_add_new_disk_process(vm, si, disk_size): spec = vim.vm.ConfigSpec() # get all disks on a VM, set unit_number to the next available for dev in vm.config.hardware.device: if hasattr(dev.backing, 'fileName'): unit_number = int(dev.unitNumber) + 1 # unit_number 7 reserved for scsi controller if unit_number == 7: unit_number += 1 if unit_number >= 16: print "we don't support this many disks" return if isinstance(dev, vim.vm.device.VirtualSCSIController): controller = dev # add disk here dev_changes = [] new_disk_kb = int(disk_size) * 1024 * 1024 disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.fileOperation = "create" disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add disk_spec.device = vim.vm.device.VirtualDisk() disk_spec.device.backing = \ vim.vm.device.VirtualDisk.FlatVer2BackingInfo() disk_spec.device.backing.thinProvisioned = True disk_spec.device.backing.diskMode = 'persistent' disk_spec.device.unitNumber = unit_number disk_spec.device.capacityInKB = new_disk_kb disk_spec.device.controllerKey = controller.key dev_changes.append(disk_spec) spec.deviceChange = dev_changes task = vm.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task])
def esxi_add_new_nic_process(vm, si, network_name): nic_prefix_label = 'Network adapter ' nic_count = 0 for dev in vm.config.hardware.device: if dev.deviceInfo.label.find(nic_prefix_label) >= 0: nic_count = nic_count + 1 nic_label = nic_prefix_label + str(nic_count + 1) nic_spec = vim.vm.device.VirtualDeviceSpec() nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nic_spec.device = vim.vm.device.VirtualE1000() nic_spec.device.wakeOnLanEnabled = True nic_spec.device.addressType = 'generated' nic_spec.device.key = 4000 nic_spec.device.deviceInfo = vim.Description() nic_spec.device.deviceInfo.label = nic_label nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() nic_spec.device.backing.network = get_obj(si.content, [vim.Network], network_name) nic_spec.device.backing.deviceName = network_name nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic_spec.device.connectable.startConnected = True nic_spec.device.connectable.connected = True nic_spec.device.connectable.allowGuestControl = True dev_changes = [] dev_changes.append(nic_spec) spec = vim.vm.ConfigSpec() spec.deviceChange = dev_changes task = vm.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task])
def esxi_destory_vm(host, usr, pwd, vm_name): try: si = connect.Connect(host=host, user=usr, pwd=pwd) vm_view = si.content.viewManager.CreateContainerView( si.content.rootFolder, [vim.VirtualMachine], True) uuid = "" for vm in vm_view.view: if vm.summary.config.name == vm_name: uuid = vm.summary.config.instanceUuid break if uuid == "": connect.Disconnect(si) return "Can't find VM(%s)!" % vm_name vm = si.content.searchIndex.FindByUuid(None, uuid, True, True) if vm.runtime.powerState == "poweredOn": task = vm.PowerOffVM_Task() tasks.wait_for_tasks(si, [task]) task = vm.Destroy_Task() tasks.wait_for_tasks(si, [task]) connect.Disconnect(si) except Exception, e: print e return "Destory VM(%s) failed!" % vm_name
def esxi_add_new_disk_process(vm, si, disk_size): spec = vim.vm.ConfigSpec() # get all disks on a VM, set unit_number to the next available for dev in vm.config.hardware.device: if hasattr(dev.backing, 'fileName'): unit_number = int(dev.unitNumber) + 1 # unit_number 7 reserved for scsi controller if unit_number == 7: unit_number += 1 if unit_number >= 16: print "we don't support this many disks" return if isinstance(dev, vim.vm.device.VirtualSCSIController): controller = dev # add disk here dev_changes = [] new_disk_kb = int(disk_size) * 1024 * 1024 disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.fileOperation = "create" disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add disk_spec.device = vim.vm.device.VirtualDisk() disk_spec.device.backing = \ vim.vm.device.VirtualDisk.FlatVer2BackingInfo() disk_spec.device.backing.thinProvisioned = True disk_spec.device.backing.diskMode = 'persistent' disk_spec.device.unitNumber = unit_number disk_spec.device.capacityInKB = new_disk_kb disk_spec.device.controllerKey = controller.key dev_changes.append(disk_spec) spec.deviceChange = dev_changes task = vm.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task])
def disc_change_mode(vm_name,disk_number,mode): try: vm = select_vm_name(vm_name) hdd_prefix_label = 'Hard disk ' hdd_label = hdd_prefix_label + str(disk_number) virtual_hdd_device = None for dev in vm.config.hardware.device: if isinstance(dev, vim.vm.device.VirtualDisk) \ and dev.deviceInfo.label == hdd_label: virtual_hdd_device = dev if not virtual_hdd_device: raise RuntimeError('Virtual {} could not be found.'.format(disk_label)) virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec() virtual_hdd_spec.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.edit virtual_hdd_spec.device = virtual_hdd_device virtual_hdd_spec.device.backing.diskMode = mode spec = vim.vm.ConfigSpec() spec.deviceChange = [virtual_hdd_spec] task = vm.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task]) return True except Exception as err: print err return False
def esxi_add_new_nic_process(vm, si, network_name): nic_prefix_label = 'Network adapter ' nic_count = 0 for dev in vm.config.hardware.device: if dev.deviceInfo.label.find(nic_prefix_label) >= 0: nic_count = nic_count + 1 nic_label = nic_prefix_label + str(nic_count + 1) nic_spec = vim.vm.device.VirtualDeviceSpec() nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nic_spec.device = vim.vm.device.VirtualE1000() nic_spec.device.wakeOnLanEnabled = True nic_spec.device.addressType = 'generated' nic_spec.device.key = 4000 nic_spec.device.deviceInfo = vim.Description() nic_spec.device.deviceInfo.label = nic_label nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo( ) nic_spec.device.backing.network = get_obj(si.content, [vim.Network], network_name) nic_spec.device.backing.deviceName = network_name nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic_spec.device.connectable.startConnected = True nic_spec.device.connectable.connected = True nic_spec.device.connectable.allowGuestControl = True dev_changes = [] dev_changes.append(nic_spec) spec = vim.vm.ConfigSpec() spec.deviceChange = dev_changes task = vm.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task])
def vmCreateWait(si, taskList): try: tasks.wait_for_tasks(si, taskList) taskList = [] except vim.fault.DuplicateName as e: print(e.msg) except Exception as e: print(e)
def create_vm_snapshot(self, vm, name=None, memory=False, quiesce=False): if not name: name = "hunter-snapshot" task = vm.CreateSnapshot_Task(name=name, memory=memory, quiesce=quiesce) tasks.wait_for_tasks(self.si, [task]) return task.info.result
def destroy_vm(self): try: if self.__vm.runtime.powerState == 'poweredOn': self.power_off_vm() task = [self.__vm.Destroy_Task()] wait_for_tasks(self.__si, tasks=task) except Exception as error: print error.message
def destroy(self): powered_on = self.powered_on() logging.info("VM is powered on: %s", powered_on) if powered_on: self.power_off() logging.info("Destroying VM...") task = self._slave_vm.Destroy_Task() tasks.wait_for_tasks(self._esx_connection, [task]) logging.info("Done destroying VM")
def revert_snapshot(self, snapshot_name): try: self.get_snapshot_list() for snapshot in self.snapshot_list: if snapshot.name == snapshot_name: snapshot_obj = snapshot.snapshot task = [snapshot_obj.RevertToSnapshot_Task()] wait_for_tasks(self.__si, tasks=task) except Exception as error: print error.message
def suspend_vm(self): try: task = [self.__vm.SuspendVM()] wait_for_tasks(self.__si, tasks=task) except vim.fault.InvalidPowerState: print 'The power state is not poweredOn.' except vim.fault.TaskInProgress: print 'The virtual machine is busy.'
def update_virtual_disk_capacity(vm_name, disk_number, new_capacity_in_gb): """ :param vm_obj: Virtual Machine Object :param disk_number: Disk Number to change :param new_capacity_in_gb: New Capacity in GB :param si: Service Instance :return: True if success """ try: vm_obj = select_vm_name(vm_name) new_capacity_in_kb = gigabytes_to_kilobytes(long(new_capacity_in_gb)) hard_disk_prefix_label = 'Hard disk ' hard_disk_label = hard_disk_prefix_label + str(disk_number) virtual_disk_device = None for dev in vm_obj.config.hardware.device: if isinstance(dev, vim.vm.device.VirtualDisk) and dev.deviceInfo.label == hard_disk_label: virtual_disk_device = dev disk_exist = True if virtual_disk_device else False if disk_exist: old_capacity_in_gb = bytes_to_gigabytes(virtual_disk_device.capacityInBytes) \ if virtual_disk_device.capacityInBytes else \ kilobytes_to_gigabytes(virtual_disk_device.capacityInKB) if new_capacity_in_gb > old_capacity_in_gb: disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit disk_spec.device = vim.vm.device.VirtualDisk() disk_spec.device.key = virtual_disk_device.key disk_spec.device.backing = virtual_disk_device.backing disk_spec.device.backing.fileName = virtual_disk_device.backing.fileName disk_spec.device.backing.diskMode = virtual_disk_device.backing.diskMode disk_spec.device.controllerKey = virtual_disk_device.controllerKey disk_spec.device.unitNumber = virtual_disk_device.unitNumber disk_spec.device.capacityInKB = long(new_capacity_in_kb) elif new_capacity_in_gb == new_capacity_in_gb: return 'Disk capacity is the same. No change need to be done.' else: raise RuntimeError('Reducing Virtual Hard Disk Size is not supported at this time.') else: disks = list() for dev in vm_obj.config.hardware.device: if isinstance(dev, vim.vm.device.VirtualDisk): disks.append(dev) next_unit_number = int(disks[-1].unitNumber) + 1 current_controller_key = int(disks[-1].controllerKey) disk_spec = create_virtual_disk(new_capacity_in_kb, current_controller_key, next_unit_number, in_bytes=False) dev_changes = [] dev_changes.append(disk_spec) spec = vim.vm.ConfigSpec() spec.deviceChange = dev_changes task = vm_obj.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task]) return True except Exception as err: print err return False
def reboot_guest(self): try: task = [self.__vm.RebootGuest()] wait_for_tasks(self.__si, tasks=task) except vim.fault.InvalidPowerState: print 'The power state is not poweredOn.' except vim.fault.TaskInProgress: print 'The virtual machine is busy.' except Exception as error: print error.message
def power_off_vm(self): try: task = [self.__vm.PowerOff()] wait_for_tasks(self.__si, tasks=task) return 0 except vim.fault.InvalidPowerState: print 'The power state is not poweredOn.' except vim.fault.TaskInProgress: print 'The virtual machine is busy.' except Exception as error: return error.message
def create_clone(self, vm, new_vm=str, host=None, poweron=True): relospec = vim.vm.RelocateSpec() relospec.diskMoveType = 'createNewChildDiskBacking' if not host: host = vm.summary.runtime.host relospec.host = host relospec.pool = host.parent.resourcePool clone_spec = vim.vm.CloneSpec() clone_spec.powerOn = poweron clone_spec.location = relospec folder = host.parent.parent.parent.vmFolder task = vm.CloneVM_Task(folder, new_vm, clone_spec) tasks.wait_for_tasks(self.si, [task]) return task.info.result
def remove_all_snapshots(self, consolidate=None): """ Remove all the snapshots associated with this virtual machine. If the virtual machine does not have any snapshots, then this operation simply return successfully. :param consolidate: :return: """ try: if consolidate: task = [self.__vm.RemoveAllSnapshots(consolidate)] else: task = [self.__vm.RemoveAllSnapshots()] wait_for_tasks(self.__si, tasks=task) except vim.fault.SnapshotFault: print 'There is an error occurs during the snapshot operation.'
def esxi_reset_vm(host, usr, pwd, vm_name): try: si = connect.Connect(host=host, user=usr, pwd=pwd) vm_view = si.content.viewManager.CreateContainerView(si.content.rootFolder, [vim.VirtualMachine], True) uuid = "" for vm in vm_view.view: if vm.summary.config.name == vm_name: uuid = vm.summary.config.instanceUuid break if uuid == "": connect.Disconnect(si) return "Can't find VM(%s)!" % vm_name vm = si.content.searchIndex.FindByUuid(None, uuid, True, True) task = vm.ResetVM_Task() tasks.wait_for_tasks(si, [task]) connect.Disconnect(si) except Exception, e: return "Reset VM(%s) failed!" % vm_name
def create_snapshot(self, snapshot_name, description, memory=True, quiesce=False): """ Creates a new snapshot of this virtual machine. :param snapshot_name: The name for this snapshot. :param description: A description for this snapshot. :param memory: If True, a dump of the internal state of the virtual machine is included in the snapshot. Memory snapshots consume time and resources, and thus take longer to create. When set to False, the power state of the snapshot is set to powered off. :param quiesce: If True and the virtual machine is powered on when the snapshot is taken, VMware Tools is used to quiesce the file system in the virtual machine. This assures that a disk snapshot represents a consistent state of the guest file system. If the virtual machine is powered off or VMware Tools are not available, the quiesce flag is ignored. :return: """ try: task = [self.__vm.CreateSnapshot_Task(name=snapshot_name, description=description, memory=memory, quiesce=quiesce)] wait_for_tasks(self.__si, tasks=task) except Exception as error: print error.message
def create_linkedclone(self, vm, new_vm=str, host=None, poweron=True): if not len(vm.rootSnapshot): self.create_vm_snapshot(vm) snapshot = vm.rootSnapshot[0] while len(snapshot.childSnapshot): snapshot = snapshot.childSnapshot[0] relospec = vim.vm.RelocateSpec() relospec.diskMoveType = 'createNewChildDiskBacking' if not host: host = vm.summary.runtime.host relospec.host = host relospec.pool = host.parent.resourcePool clone_spec = vim.vm.CloneSpec() clone_spec.powerOn = poweron clone_spec.location = relospec clone_spec.snapshot = snapshot folder = host.parent.parent.parent.vmFolder task = vm.CloneVM_Task(folder, new_vm, clone_spec) tasks.wait_for_tasks(self.si, [task]) return task.info.result
def esxi_change_memory(host, usr, pwd, vm_name, mem_size): try: si = connect.Connect(host=host, user=usr, pwd=pwd) vm_view = si.content.viewManager.CreateContainerView(si.content.rootFolder, [vim.VirtualMachine], True) uuid = "" for vm in vm_view.view: if vm.summary.config.name == vm_name: uuid = vm.summary.config.instanceUuid break if uuid == "": connect.Disconnect(si) return "Can't find VM(%s)!" % vm_name vm = si.content.searchIndex.FindByUuid(None, uuid, True, True) spec = vim.vm.ConfigSpec() spec.memoryMB = int(mem_size) + 512 task = vm.ReconfigVM_Task(spec) tasks.wait_for_tasks(si, [task]) connect.Disconnect(si) except Exception, e: return "Change VM Memory on VM(%s) failed!" % vm_name
def esxi_change_memory(host, usr, pwd, vm_name, mem_size): try: si = connect.Connect(host=host, user=usr, pwd=pwd) vm_view = si.content.viewManager.CreateContainerView( si.content.rootFolder, [vim.VirtualMachine], True) uuid = "" for vm in vm_view.view: if vm.summary.config.name == vm_name: uuid = vm.summary.config.instanceUuid break if uuid == "": connect.Disconnect(si) return "Can't find VM(%s)!" % vm_name vm = si.content.searchIndex.FindByUuid(None, uuid, True, True) spec = vim.vm.ConfigSpec() spec.memoryMB = int(mem_size) + 512 task = vm.ReconfigVM_Task(spec) tasks.wait_for_tasks(si, [task]) connect.Disconnect(si) except Exception, e: return "Change VM Memory on VM(%s) failed!" % vm_name
def remove_disc(vm_name,disk_number): try: vm = select_vm_name(vm_name) hdd_prefix_label = 'Hard disk ' hdd_label = hdd_prefix_label + str(disk_number) virtual_hdd_device = None for dev in vm.config.hardware.device: if isinstance(dev, vim.vm.device.VirtualDisk) \ and dev.deviceInfo.label == hdd_label: virtual_hdd_device = dev virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec() virtual_hdd_spec.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.remove virtual_hdd_spec.device = virtual_hdd_device spec = vim.vm.ConfigSpec() spec.deviceChange = [virtual_hdd_spec] task = vm.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task]) return True except Exception as err: print err return False
def clone_vm_from_template(self, template_name, vm_name, folder_name=None, host_name=None, ds_name=None, rp_name=None): if folder_name == None: folder_name = 'Test' if host_name == None: host_name = '10.224.104.31' if ds_name == None: ds_name = 'local2-2' template = None templates = self.__get_all_templates() for t in templates: if t.name == template_name: template = t break if not template: raise SystemError('Unable to get template with supplied info.') folder = None folders = self.__get_all_objects_by_type(vim_type=[vim.Folder]) for f in folders: if f.name == folder_name: folder = f break if not folder: raise SystemError('Unable to get folder with supplied info.') resource_pool = None resource_pools = self.__get_all_objects_by_type( vim_type=[vim.ResourcePool]) if rp_name: for rp in resource_pools: if rp.name == rp_name: resource_pool = rp break else: for rp in resource_pools: if rp.owner.name == host_name and rp.name == 'Resources': resource_pool = rp break if not resource_pool: raise SystemError( 'Unable to get resource pool with supplied info.') datastore = None datastores = self.__get_all_objects_by_type(vim_type=[vim.Datastore]) for ds in datastores: if ds.name == ds_name: datastore = ds break if not datastore: raise SystemError('Unable to get to datastore with supplied info.') relocate_spec = vim.vm.RelocateSpec() relocate_spec.datastore = datastore relocate_spec.pool = resource_pool clone_spec = vim.vm.CloneSpec() clone_spec.location = relocate_spec clone_spec.powerOn = False task = [template.Clone(folder=folder, name=vm_name, spec=clone_spec)] #task = [template.cl] wait_for_tasks(self.__si, task)
def power_off(self): logging.info("Powering off VM...") task = self._slave_vm.PowerOffVM_Task() logging.debug("Power off task started") tasks.wait_for_tasks(self._esx_connection, [task]) logging.info("Done Powering off VM")