Beispiel #1
0
    def insert_usb_device(self, vm, descriptor):
        """Insert a USB device into a VM.

        The device(s) are selected based on the descriptor string. This is
        typically of the form:

            path:1/6/2 version:2

        This example corresponds to a USB 2.0 device plugged into port 1 of
        the hub which itself is plugged into port 6 of the root hub on USB
        controller 1. This string can be obtained from
        `Connection.list_usb_devices_on_host`, by inspecting the
        `physicalPath` property of the return devices.

        :param vm: the vm to remove USB device from (see `get_vm`, or pass VM
            name)
        :param descriptor: USB device descriptor string
        """
        if isinstance(vm, six.string_types):
            vm = self.get_vm(vm)
        cfg = vim.VirtualDeviceConfigSpec()
        cfg.operation = vim.VirtualDeviceConfigSpecOperation.add
        cfg.device = vim.VirtualUSB()
        cfg.device.key = -100
        cfg.device.backing = vim.VirtualUSBUSBBackingInfo()
        cfg.device.backing.deviceName = descriptor
        cfg.device.connectable = vim.VirtualDeviceConnectInfo()
        cfg.device.connectable.startConnected = True
        cfg.device.connectable.allowGuestControl = False
        cfg.device.connectable.connected = True
        cfg.device.connected = True
        spec = vim.VirtualMachineConfigSpec()
        spec.deviceChange = [cfg]
        self.wait_for_tasks([vm.ReconfigVM_Task(spec=spec)])
Beispiel #2
0
    def remove_usb_device(self, vm, descriptor):
        """Remove a USB device from a VM.

        Can remove a device by descriptor (see `insert_usb_device`) or by
        device key (see the `key` property of the devices returned by
        `list_usb_devices_on_guest`)

        :param vm: the vm to remove USB device from (see `get_vm`, or pass VM
            name)
        :param descriptor: USB device descriptor string
        """
        if isinstance(vm, six.string_types):
            vm = self.get_vm(vm)

        key = None
        for dev in self.list_usb_devices_on_guest(vm):
            if dev.backing.deviceName == descriptor:
                key = dev.key
                break
        else:
            raise ValueError("Descriptor not found in VM")

        cfg = vim.VirtualDeviceConfigSpec()
        cfg.operation = vim.VirtualDeviceConfigSpecOperation.remove
        cfg.device = vim.VirtualUSB()
        cfg.device.key = key
        cfg.device.backing = vim.VirtualUSBUSBBackingInfo()
        cfg.device.backing.deviceName = descriptor
        spec = vim.VirtualMachineConfigSpec()
        spec.deviceChange = [cfg]
        self.wait_for_tasks([vm.ReconfigVM_Task(spec=spec)])
def deviceConfigSpecRemoveNIC(vmachine, network_device_key):
    # it's possible to remove a NIC based on other fields (key, deviceInfo.label)
    # however commonly VMs have a 1:1 - NIC:Network relationship,
    # therefore the NIC can be found by passing the network it's attached too
    if vmachine.runtime.powerState == "poweredON":
        print "For removing " + network + " network, the virtual machine should be powered Off"
        return None

    config_spec_operation = vim.VirtualDeviceConfigSpecOperation('remove')
    deviceObj_List = [
        device for device in vmachine.config.hardware.device
        if device.key == int(network_device_key)
    ]

    if len(deviceObj_List) == 1:
        deviceObj = deviceObj_List[0]
        if (deviceObj):
            print "Removing NIC '" + deviceObj.deviceInfo.label + "' (key = " + str(
                network_device_key) + ") ..."
            devspec = vim.VirtualDeviceConfigSpec(
                operation=config_spec_operation, device=deviceObj)
            return devspec
    else:
        print "Failed - no clue"
        return None
Beispiel #4
0
    def _add_pci_device_to_vm(self, vm_obj, pci_Passthrough_device_obj, pci_id):
        changed = False
        failed = False
        vm_current_pci_devices = self._get_the_pci_devices_in_the_vm(vm_obj)
        if self.params['force'] or pci_id not in vm_current_pci_devices:
            deviceid = hex(pci_Passthrough_device_obj.pciDevice.deviceId % 2**16).lstrip('0x')
            systemid = pci_Passthrough_device_obj.systemId
            backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceid,
                                                                 id=pci_id,
                                                                 systemId=systemid,
                                                                 vendorId=pci_Passthrough_device_obj.pciDevice.vendorId,
                                                                 deviceName=pci_Passthrough_device_obj.pciDevice.deviceName)
            hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
            new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
            new_device_config.operation = "add"
            vmConfigSpec = vim.vm.ConfigSpec()
            vmConfigSpec.deviceChange = [new_device_config]
            vmConfigSpec.memoryReservationLockedToMax = True

            try:
                task = vm_obj.ReconfigVM_Task(spec=vmConfigSpec)
                wait_for_task(task)
                changed = True
            except Exception as exc:
                failed = True
                self.module.fail_json(msg="Failed to add Pci device"
                                          " '{}' to vm {}.".format(pci_id, vm_obj.name),
                                          detail=exc.msg)
        else:
            return changed, failed
        return changed, failed
Beispiel #5
0
def add_pvscsi_controller(vm, controllers, max_scsi_controllers, offset_from_bus_number):
    ''' 
    Add a new PVSCSI controller, return (controller_key, err) pair
    '''
    # find empty bus slot for the controller:
    taken = set([c.busNumber for c in controllers])
    avail = set(range(0, max_scsi_controllers)) - taken

    key = avail.pop()  # bus slot
    controller_key = key + offset_from_bus_number
    disk_slot = 0
    controller_spec = vim.VirtualDeviceConfigSpec(
        operation='add',
        device=vim.ParaVirtualSCSIController(key=controller_key,
                                                busNumber=key,
                                                sharedBus='noSharing', ), )
    # changes spec content goes here
    pvscsi_change = []
    pvscsi_change.append(controller_spec)
    spec = vim.vm.ConfigSpec()
    spec.deviceChange = pvscsi_change

    try:
        wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
    except vim.fault.VimFault as ex:
        msg=("Failed to add PVSCSI Controller: %s", ex.msg)
        return None, err(msg)
    logging.debug("Added a PVSCSI controller, controller_id=%d", controller_key)    
    return controller_key, None
Beispiel #6
0
    def add_pci_device(self, vm, pci_device):
        """
        Attaches PCI device to VM

        Args:
            vm (vim.VirtualMachine): VM instance
            pci_device (vim.vm.PciPassthroughInfo): PCI device to add

        """
        host = vm.runtime.host.name
        logger.info(
            f"Adding PCI device with ID:{pci_device.pciDevice.id} on host {host} to {vm.name}"
        )
        deviceId = hex(pci_device.pciDevice.deviceId % 2**16).lstrip("0x")
        backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
            deviceId=deviceId,
            id=pci_device.pciDevice.id,
            systemId=pci_device.systemId,
            vendorId=pci_device.pciDevice.vendorId,
            deviceName=pci_device.pciDevice.deviceName,
        )

        hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
        new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
        new_device_config.operation = "add"

        vmConfigSpec = vim.vm.ConfigSpec()
        vmConfigSpec.memoryReservationLockedToMax = True
        vmConfigSpec.deviceChange = [new_device_config]
        WaitForTask(vm.ReconfigVM_Task(spec=vmConfigSpec))
    def config_network(self, uuid, **kwargs):
        self.__logger.debug('config_network')
        self.__check_connection()
        for i in range(settings.app['vsphere']['retries']['config_network']):
            try:

                vm = self.content.searchIndex.FindByUuid(None, uuid, True)

                for device in vm.config.hardware.device:
                    if type(device) == vim.vm.device.VirtualE1000 or \
                       type(device) == vim.vm.device.VirtualE1000e or \
                       type(device) == vim.vm.device.VirtualPCNet32 or \
                       type(device) == vim.vm.device.VirtualVmxnet or \
                       type(device) == vim.vm.device.VirtualVmxnet2 or \
                       type(device) == vim.vm.device.VirtualVmxnet3:
                        device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(
                            deviceName=kwargs['interface_name'])

                        device_config_spec = vim.VirtualDeviceConfigSpec(
                            operation=vim.VirtualDeviceConfigSpecOperation(
                                'edit'),
                            device=device)

                        machine_config_spec = vim.vm.ConfigSpec(
                            deviceChange=[device_config_spec])
                        task = vm.ReconfigVM_Task(spec=machine_config_spec)
                        self.wait_for_task(task)
                break
            except Exception:
                self.__sleep_between_tries()
Beispiel #8
0
    def add_pci(self, pci, host_obj, vm_update, vm_status, mmio_size):
        """ Add a PCI device for a VM.
            If a PCI device has large BARs, it requires 64bit MMIO
            support and large enough MMIO mapping space. This method will add
            these two configurations by default and check uEFI installation.
            But haven't evaluated the impacts of
            adding these configurations for a PCI device which doesn't have
            large BARs. For more details, check the reference KB article.

        Args:
            pci (str): pci ID of the PCI device
            host_obj (vim.HostSystem): Host obj to locate the PCI device
            vm_update (ConfigVM): VM update obj
            vm_status (GetVM): VM status obj
            mmio_size (int): 64-bit MMIO space in GB

        Returns:
            list: a list of Task objects

        References:
            https://kb.vmware.com/s/article/2142307

        """

        self.logger.info("Adding PCI device {0} for {1}".format(
            pci, self.vm_obj.name))
        extra_config_key1 = "pciPassthru.64bitMMIOSizeGB"
        extra_config_key2 = "pciPassthru.use64bitMMIO"
        if mmio_size is None:
            mmio_size = 256
        tasks = []
        pci_obj = GetHost(host_obj).pci_obj(pci)
        # Convert decimal to hex for the device ID of PCI device
        device_id = hex(pci_obj.deviceId % 2**16).lstrip("0x")
        if not vm_status.uefi():
            self.logger.warning("VM {0} is not installed with UEFI. "
                                "If PCI device has large BARs, "
                                "UEFI installation is required.".format(
                                    self.vm_obj.name))
        else:
            self.logger.info("Good. VM {0} has UEFI "
                             "installation.".format(self.vm_obj.name))
        sys_id = vm_status.pci_id_sys_id_passthru()
        backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
            deviceId=device_id,
            id=pci_obj.id,
            systemId=sys_id[pci_obj.id],
            vendorId=pci_obj.vendorId,
            deviceName=pci_obj.deviceName,
        )
        backing_obj = vim.VirtualPCIPassthrough(backing=backing)
        dev_config_spec = vim.VirtualDeviceConfigSpec(device=backing_obj)
        dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        config_spec = vim.vm.ConfigSpec()
        config_spec.deviceChange = [dev_config_spec]
        tasks.append(self.vm_obj.ReconfigVM_Task(spec=config_spec))
        tasks.append(vm_update.add_extra(extra_config_key1, str(mmio_size)))
        tasks.append(vm_update.add_extra(extra_config_key2, "TRUE"))
        return tasks
Beispiel #9
0
 def attach_sp_to_vm(self):
     profiles = self.spbmclient.get_profiles()
     if not profiles:
         self.module.fail_json(
             changed=False,
             msg=
             "Could not retrieve Storage Profile Information from vCenter")
     else:
         for profile in profiles:
             if self.module.params['profilename'] == profile.name:
                 self.profile = profile
                 break
         if self.profile is None:
             self.module.fail_json(
                 changed=False,
                 msg="Did not find Storage Profile {} in vCenter".format(
                     self.profilename))
         vm = find_vm_by_name(content=self.content, vm_name=self.vmname)
         if vm is None:
             self.module.fail_json(
                 changed=False,
                 msg="Did not find VM {} in vCenter".format(self.vmname))
         # For attaching a storage Profile to a VM, you need to attach the
         # profile to VM Home and individual disks.
         disks = []
         devices = vm.config.hardware.device
         for device in devices:
             if device:
                 if device and isinstance(device,
                                          vim.vm.device.VirtualDisk):
                     disks.append(device)
         device_change = []
         for disk in disks:
             profile = vim.VirtualMachineDefinedProfileSpec(
                 profileId=self.profile.profileId.uniqueId)
             device_change.append(
                 vim.VirtualDeviceConfigSpec(
                     device=disk,
                     operation=vim.VirtualDeviceConfigSpecOperation.edit,
                     profile=[profile]))
         profile = vim.VirtualMachineDefinedProfileSpec(
             profileId=self.profile.profileId.uniqueId)
         vmSpec = vim.VirtualMachineConfigSpec(vmProfile=[profile],
                                               deviceChange=device_change)
         state, error = wait_for_task(vm.ReconfigVM_Task(spec=vmSpec))
         if state:
             self.module.exit_json(
                 changed=True,
                 msg="Attached Profile {} to VM {}".format(
                     self.profilename, self.vmname))
         else:
             self.module.fail_json(
                 changed=False,
                 msg="Failed to attach Profile {} to VM {}. Error {}".
                 format(self.profilename, self.vmname, error))
Beispiel #10
0
    def reset_sp_vm(self):
        profiles = self.spbmclient.get_profiles()
        if not profiles:
            self.module.fail_json(
                changed=False,
                msg=
                "Could not retrieve Storage Profile Information from vCenter")
        else:
            vm = find_vm_by_name(content=self.content, vm_name=self.vmname)
            if vm is None:
                self.module.fail_json(
                    changed=False,
                    msg="Did not find VM {} in vCenter".format(self.vmname))

            # Find Where VM Home is Residing
            vmPathName = vm.config.files.vmPathName
            vmx_datastore = vmPathName.partition(']')[0].replace('[', '')
            vmx_profile = self.spbmclient.get_ds_default_profile(
                find_datastore_by_name(content=self.content,
                                       datastore_name=vmx_datastore))

            disks = []
            devices = vm.config.hardware.device
            for device in devices:
                if device:
                    if device and isinstance(device,
                                             vim.vm.device.VirtualDisk):
                        disks.append(device)
            device_change = []
            for disk in disks:
                datastore_profile = self.spbmclient.get_ds_default_profile(
                    disk.backing.datastore)
                profile = vim.VirtualMachineDefinedProfileSpec(
                    profileId=datastore_profile.uniqueId)
                device_change.append(
                    vim.VirtualDeviceConfigSpec(
                        device=disk,
                        operation=vim.VirtualDeviceConfigSpecOperation.edit,
                        profile=[profile]))
            profile = vim.VirtualMachineDefinedProfileSpec(
                profileId=vmx_profile.uniqueId)
            vmSpec = vim.VirtualMachineConfigSpec(vmProfile=[profile],
                                                  deviceChange=device_change)
            state, error = wait_for_task(vm.ReconfigVM_Task(spec=vmSpec))
            if state:
                self.module.exit_json(
                    changed=True,
                    msg="Detached Profile {} from VM {}".format(
                        self.profilename, self.vmname))
            else:
                self.module.fail_json(
                    changed=False,
                    msg="Failed to Detach Profile from VM {}. Error {}".format(
                        self.vmname, error))
Beispiel #11
0
def main():
    context = None
    if hasattr(ssl, '_create_unverified_context'):
        context = ssl._create_unverified_context()
    si = SmartConnect(host="[VCSA/Host NAME or IP HERE]",
                      user="******",
                      pwd="[PASSWORD HERE]",
                      port=443,
                      sslContext=context)
    if not si:
        print("Could not connect to the specified host using specified "
              "username and password")
        return -1

    atexit.register(Disconnect, si)

    ###################################################
    # New Content
    ###################################################

    HostContent = si.content

    vm = None
    vGPUobj = None
    TempVMlist = HostContent.viewManager.CreateContainerView(HostContent.rootFolder,\
        [vim.VirtualMachine], True)
    for managed_VM_ref in TempVMlist.view:  #Go thought VM list
        if managed_VM_ref.name == "Compute000":  #find Desired VM
            print(managed_VM_ref)
            print(managed_VM_ref.name)
            vm = managed_VM_ref  #Capture VM as an obj to use next
    if vm != None:  #Safety to make sure not added to null object
        for VMVirtDevice in vm.config.hardware.device:  #Go through vPCI and find vGPU
            if isinstance(VMVirtDevice, vim.VirtualPCIPassthrough) and \
                hasattr(VMVirtDevice.backing, "vgpu"):
                if VMVirtDevice.backing.vgpu == "grid_p4-4q":
                    vGPUobj = VMVirtDevice
                    print("Found vGPU: " + VMVirtDevice.backing.vgpu)

        cspec = vim.vm.ConfigSpec()
        cspec.deviceChange = [vim.VirtualDeviceConfigSpec()]
        cspec.deviceChange[0].operation = 'remove'
        cspec.deviceChange[0].device = vGPUobj
        WaitForTask(vm.Reconfigure(cspec))
        print("Removed vGPU")


###################################################
# End New Content
###################################################

    return 0
def deviceConfigSpecAddNIC(vmachine, network):
    if vmachine.runtime.powerState == "poweredON":
        print "For adding " + network + " network, the virtual machine should be powered Off"
        return None
    config_spec_operation = vim.VirtualDeviceConfigSpecOperation('add')
    backing_info = vim.VirtualEthernetCardNetworkBackingInfo(
        deviceName=network)
    device = vim.VirtualVmxnet3(key=-1, backing=backing_info)
    if (device):
        print "Adding NIC and attaching to network '" + network + "' ..."
        devspec = vim.VirtualDeviceConfigSpec(operation=config_spec_operation,
                                              device=device)
        return devspec
Beispiel #13
0
def main():
    context = None
    if hasattr(ssl, '_create_unverified_context'):
        context = ssl._create_unverified_context()
    si = SmartConnect(host="[VCSA/Host NAME or IP HERE]",
                      user="******",
                      pwd="[PASSWORD HERE]",
                      port=443,
                      sslContext=context)
    if not si:
        print("Could not connect to the specified host using specified "
              "username and password")
        return -1

    atexit.register(Disconnect, si)

    ###################################################
    # New Content
    ###################################################

    HostContent = si.content

    vm = None
    TempVMlist = HostContent.viewManager.CreateContainerView(HostContent.rootFolder,\
        [vim.VirtualMachine], True)
    for managed_VM_ref in TempVMlist.view:  #Go thought VM list
        if managed_VM_ref.name == "Compute000":  #find Desired VM
            print(managed_VM_ref)
            print(managed_VM_ref.name)
            vm = managed_VM_ref  #Capture VM as an obj to use next
    if vm != None:  #Safety to make sure not added to null object
        cspec = vim.vm.ConfigSpec()
        cspec.deviceChange = [vim.VirtualDeviceConfigSpec()]
        cspec.deviceChange[0].operation = 'add'
        cspec.deviceChange[0].device = vim.VirtualPCIPassthrough()
        cspec.deviceChange[0].device.deviceInfo = vim.Description()
        cspec.deviceChange[
            0].device.deviceInfo.summary = 'NVIDIA GRID vGPU grid_p4-4q'
        cspec.deviceChange[0].device.deviceInfo.label = 'New PCI device'
        cspec.deviceChange[0].device.backing = \
            vim.VirtualPCIPassthroughVmiopBackingInfo(vgpu='grid_p4-4q')
        #cspec.deviceChange[0].device.backing.vgpu =str('grid_p4-2q')
        WaitForTask(vm.Reconfigure(cspec))


###################################################
# End New Content
###################################################

    return 0
def add_pci_nics(args, vm):
    pci_id_list = args.pci_nics.rstrip(',')
    pci_id_list = pci_id_list.split(',')
    pci_id_list.sort()
    if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
        print "VM:%s is powered ON. Cannot do hot pci add now. Shutting it down" % (
            args.vm_name)
        poweroffvm(vm)
    for pci_id in pci_id_list:
        device_config_list = []
        found = False
        for device_list in vm.config.hardware.device:
            if (isinstance(device_list, vim.vm.device.VirtualPCIPassthrough)) == True \
                and device_list.backing.id == pci_id:
                print "pci_device already present! Not adding the pci device."
                found = True
                break
            if found == True:
                continue
            pci_passthroughs = vm.environmentBrowser.QueryConfigTarget(
                host=None).pciPassthrough
            for pci_entry in pci_passthroughs:
                if pci_entry.pciDevice.id == pci_id:
                    found = True
                    print "Found the pci device %s in the host" % (pci_id)
                    break
            if found == False:
                print "Did not find the pci passthrough device %s on the host" % (
                    pci_id)
                exit(1)
            print "Adding PCI device to Contrail VM: %s" % (args.vm_name)
            deviceId = hex(pci_entry.pciDevice.deviceId % 2**16).lstrip('0x')
            backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
                deviceId=deviceId,
                id=pci_entry.pciDevice.id,
                systemId=pci_entry.systemId,
                vendorId=pci_entry.pciDevice.vendorId,
                deviceName=pci_entry.pciDevice.deviceName)
            hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
            new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
            new_device_config.operation = "add"
            new_device_config.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo(
            )
            new_device_config.device.connectable.startConnected = True
            device_config_list.append(new_device_config)
            vm_spec = vim.vm.ConfigSpec()
            vm_spec.deviceChange = device_config_list
            task = vm.ReconfigVM_Task(spec=vm_spec)
            wait_for_task(task)
Beispiel #15
0
    def remove_pci(self, pci, vm_status):
        """
        Remove a PCI device from a VM

        Args:
            pci (str): pci ID of the PCI device
            vm_status (GetVM): the VM status obj

        Returns:
            Task
        """
        self.logger.info("Removing PCI {0} from VM "
                         "{1}".format(pci, self.vm_obj.name))
        pci_obj = vm_status.pci_obj(pci)
        dev_config_spec = vim.VirtualDeviceConfigSpec()
        dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
        dev_config_spec.device = pci_obj
        config_spec = vim.vm.ConfigSpec()
        config_spec.deviceChange = [dev_config_spec]
        return self.vm_obj.ReconfigVM_Task(spec=config_spec)
Beispiel #16
0
    def add_vgpu(self, vgpu_profile):
        """ Add a vGPU profile for a VM

        Args:
            vgpu_profile (str): the name of vGPU profile to be added into a VM

        Returns:
            Task

        """

        self.logger.info("Adding vGPU {0} for "
                         "VM {1}".format(vgpu_profile, self.vm_obj.name))
        backing = vim.VirtualPCIPassthroughVmiopBackingInfo(vgpu=vgpu_profile)
        backing_obj = vim.VirtualPCIPassthrough(backing=backing)
        dev_config_spec = vim.VirtualDeviceConfigSpec(device=backing_obj)
        dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        config_spec = vim.vm.ConfigSpec()
        config_spec.deviceChange = [dev_config_spec]
        return self.vm_obj.ReconfigVM_Task(spec=config_spec)
Beispiel #17
0
    def remove_vgpu(self, vgpu_profile):
        """
        Remove a vGPU profile for a VM

        Args:
            vgpu_profile (str): the name of vGPU profile to be removed from a VM

        Returns:
            Task
        """
        self.logger.info("Removing vGPU %s from VM %s" %
                         (vgpu_profile, self.vm_obj.name))
        vm_status = GetVM(self.vm_obj)
        vgpu_obj = vm_status.vgpu_obj(vgpu_profile)
        dev_config_spec = vim.VirtualDeviceConfigSpec()
        dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
        dev_config_spec.device = vgpu_obj
        config_spec = vim.vm.ConfigSpec()
        config_spec.deviceChange = [dev_config_spec]
        return self.vm_obj.ReconfigVM_Task(spec=config_spec)
Beispiel #18
0
    def _add_vgpu_profile_to_vm(self, vm_obj, vgpu_profile_name, vgpu_prfl):
        """
        Add vGPU profile of virtual machine
        Args:
            vm_obj: Managed object of virtual machine
            vgpu_profile_name: vGPU profile object name from ESXi server list
            vgpu_prfl: vGPU profile name
        Returns: Operation results
        """
        changed = False
        failed = False
        vm_current_vgpu_profile = self._get_vgpu_profile_in_the_vm(vm_obj)
        if self.params["force"] or vgpu_prfl not in vm_current_vgpu_profile:
            vgpu_p = vgpu_profile_name.vgpu
            backing = vim.VirtualPCIPassthroughVmiopBackingInfo(vgpu=vgpu_p)
            summary = "NVIDIA GRID vGPU " + vgpu_prfl
            deviceInfo = vim.Description(summary=summary, label="PCI device 0")
            hba_object = vim.VirtualPCIPassthrough(backing=backing,
                                                   deviceInfo=deviceInfo)
            new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
            new_device_config.operation = "add"
            vmConfigSpec = vim.vm.ConfigSpec()
            vmConfigSpec.deviceChange = [new_device_config]
            vmConfigSpec.memoryReservationLockedToMax = True

            try:
                task = vm_obj.ReconfigVM_Task(spec=vmConfigSpec)
                wait_for_task(task)
                changed = True
            except Exception as exc:
                failed = True
                self.module.fail_json(
                    msg="Failed to add vGPU Profile"
                    " '%s' to vm %s." % (vgpu_prfl, vm_obj.name),
                    detail=exc.msg,
                )
        else:
            return changed, failed
        return changed, failed
Beispiel #19
0
def disk_attach(vmdk_path, vm):
    '''
    Attaches *existing* disk to a vm on a PVSCI controller
    (we need PVSCSI to avoid SCSI rescans in the guest)
    return error or unit:bus numbers of newly attached disk.
    '''

    kv_status_attached, kv_uuid, attach_mode = getStatusAttached(vmdk_path)
    logging.info("Attaching {0} as {1}".format(vmdk_path, attach_mode))

    # If the volume is attached then check if the attach is stale (VM is powered off).
    # Otherwise, detach the disk from the VM it's attached to.
    if kv_status_attached and kv_uuid != vm.config.uuid:
       ret_err = handle_stale_attach(vmdk_path, kv_uuid)
       if ret_err:
          return ret_err

    # NOTE: vSphere is very picky about unit numbers and controllers of virtual
    # disks. Every controller supports 15 virtual disks, and the unit
    # numbers need to be unique within the controller and range from
    # 0 to 15 with 7 being reserved (for older SCSI controllers).
    # It is up to the API client to add controllers as needed.
    # SCSI Controller keys are in the range of 1000 to 1003 (1000 + bus_number).
    offset_from_bus_number = 1000
    max_scsi_controllers = 4


    devices = vm.config.hardware.device

    # get all scsi controllers (pvsci, lsi logic, whatever)
    controllers = [d for d in devices
                   if isinstance(d, vim.VirtualSCSIController)]

    # Check if this disk is already attached, and if it is - skip the disk
    # attach and the checks on attaching a controller if needed.
    device = findDeviceByPath(vmdk_path, vm)
    if device:
        # Disk is already attached.
        logging.warning("Disk %s already attached. VM=%s",
                        vmdk_path, vm.config.uuid)
        setStatusAttached(vmdk_path, vm)
        # Get that controller to which the device is configured for
        pvsci = [d for d in controllers
                   if type(d) == vim.ParaVirtualSCSIController and
                      d.key == device.controllerKey]
        
        return dev_info(device.unitNumber,
                        get_controller_pci_slot(vm, pvsci[0],
                                                offset_from_bus_number))
        

    # Disk isn't attached, make sure we have a PVSCI and add it if we don't
    # check if we already have a pvsci one
    pvsci = [d for d in controllers
             if type(d) == vim.ParaVirtualSCSIController]
    disk_slot = None         
    if len(pvsci) > 0:
        idx, disk_slot = find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number);
        if (disk_slot is not None):
            controller_key = pvsci[idx].key
            pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
                                                      offset_from_bus_number)
            logging.debug("Find an available disk slot, controller_key=%d, slot_id=%d",
                          controller_key, disk_slot)
        
    if (disk_slot is None):
        disk_slot = 0  # starting on a fresh controller
        if len(controllers) >= max_scsi_controllers:
            msg = "Failed to place new disk - The maximum number of supported volumes has been reached."
            logging.error(msg + " VM=%s", vm.config.uuid)
            return err(msg)

        logging.info("Adding a PVSCSI controller")
        
        controller_key, ret_err = add_pvscsi_controller(vm, controllers, max_scsi_controllers, 
                                                        offset_from_bus_number)

        if (ret_err):
            return ret_err    
            
        # Find the controller just added
        devices = vm.config.hardware.device
        pvsci = [d for d in devices
                 if type(d) == vim.ParaVirtualSCSIController and
                 d.key == controller_key]
        pci_slot_number = get_controller_pci_slot(vm, pvsci[0],
                                                  offset_from_bus_number)
        logging.info("Added a PVSCSI controller, controller_key=%d pci_slot_number=%s",
                      controller_key, pci_slot_number)
    
    # add disk as independent, so it won't be snapshotted with the Docker VM
    disk_spec = vim.VirtualDeviceConfigSpec(
        operation='add',
        device=
        vim.VirtualDisk(backing=vim.VirtualDiskFlatVer2BackingInfo(
            fileName="[] " + vmdk_path,
            diskMode=attach_mode, ),
                        deviceInfo=vim.Description(
                            # TODO: use docker volume name here. Issue #292
                            label="dockerDataVolume",
                            summary="dockerDataVolume", ),
                        unitNumber=disk_slot,
                        controllerKey=controller_key, ), )
    disk_changes = []
    disk_changes.append(disk_spec)

    spec = vim.vm.ConfigSpec()
    spec.deviceChange = disk_changes

    try:
        wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
    except vim.fault.VimFault as ex:
        msg = ex.msg
        # Use metadata (KV) for extra logging
        if kv_status_attached:
            # KV  claims we are attached to a different VM'.
            msg += " disk {0} already attached to VM={1}".format(vmdk_path,
                                                                 kv_uuid)
            if kv_uuid == vm.config.uuid:
                msg += "(Current VM)"
        return err(msg)

    setStatusAttached(vmdk_path, vm)
    logging.info("Disk %s successfully attached. controller pci_slot_number=%s, disk_slot=%d",
                 vmdk_path, pci_slot_number, disk_slot)
    return dev_info(disk_slot, pci_slot_number)
Beispiel #20
0
def disk_attach(vmdk_path, vm):
    '''
    Attaches *existing* disk to a vm on a PVSCI controller
    (we need PVSCSI to avoid SCSI rescans in the guest)
    return error or unit:bus numbers of newly attached disk.
    '''

    # NOTE: vSphere is very picky about unit numbers and controllers of virtual
    # disks. Every controller supports 15 virtual disks, and the unit
    # numbers need to be unique within the controller and range from
    # 0 to 15 with 7 being reserved (for older SCSI controllers).
    # It is up to the API client to add controllers as needed.
    # SCSI Controller keys are in the range of 1000 to 1003 (1000 + bus_number).
    offset_from_bus_number = 1000
    max_scsi_controllers = 4


    devices = vm.config.hardware.device

    # Make sure we have a PVSCI and add it if we don't
    # TODO: add more controllers if we are out of slots. Issue #38

    # get all scsi controllers (pvsci, lsi logic, whatever)
    controllers = [d for d in devices
                   if isinstance(d, vim.VirtualSCSIController)]

    # check if we already have a pvsci one
    pvsci = [d for d in controllers
             if type(d) == vim.ParaVirtualSCSIController]
    if len(pvsci) > 0:
        disk_slot = None  # need to find out
        controller_key = pvsci[0].key
        bus_number = pvsci[0].busNumber
    else:
        logging.warning(
            "Warning: PVSCI adapter is missing - trying to add one...")
        disk_slot = 0  # starting on a fresh controller
        if len(controllers) >= max_scsi_controllers:
            msg = "Failed to place PVSCI adapter - out of bus slots"
            logging.error(msg + " VM=%s", vm.config.uuid)
            return err(msg)

        # find empty bus slot for the controller:
        taken = set([c.busNumber for c in controllers])
        avail = set(range(0, max_scsi_controllers)) - taken

        key = avail.pop()  # bus slot
        controller_key = key + offset_from_bus_number
        disk_slot = 0
        bus_number = key
        controller_spec = vim.VirtualDeviceConfigSpec(
            operation='add',
            device=vim.ParaVirtualSCSIController(key=controller_key,
                                                 busNumber=key,
                                                 sharedBus='noSharing', ), )
        # changes spec content goes here
        pvscsi_change = []
        pvscsi_change.append(controller_spec)
        spec = vim.vm.ConfigSpec()
        spec.deviceChange = pvscsi_change

        try:
            wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
        except vim.fault.VimFault as ex:
            msg=("Failed to add PVSCSI Controller: %s", ex.msg)
            return err(msg)


    # Check if this disk is already attached, and if it is - skip the attach
    device = findDeviceByPath(vmdk_path, vm)
    if device:
        # Disk is already attached.
        logging.warning("Disk %s already attached. VM=%s",
                        vmdk_path, vm.config.uuid)
        setStatusAttached(vmdk_path, vm)
        return busInfo(device.unitNumber,
                       device.controllerKey - offset_from_bus_number)

    # Find a slot on the controller, issue attach task and wait for completion
    if not disk_slot:
        taken = set([dev.unitNumber
                     for dev in devices
                     if type(dev) == vim.VirtualDisk and dev.controllerKey ==
                     controller_key])
        # search in 15 slots, with unit_number 7 reserved for scsi controller
        availSlots = set(range(0, 6) + range(8, 16)) - taken

        if len(availSlots) == 0:
            msg = "Failed to place new disk - out of disk slots"
            logging.error(msg + " VM=%s", vm.config.uuid)
            return err(msg)

        disk_slot = availSlots.pop()
        logging.debug("controller_key = %d slot = %d", controller_key, disk_slot)

    # add disk as independent, so it won't be snapshotted with the Docker VM
    disk_spec = vim.VirtualDeviceConfigSpec(
        operation='add',
        device=
        vim.VirtualDisk(backing=vim.VirtualDiskFlatVer2BackingInfo(
            fileName="[] " + vmdk_path,
            diskMode='independent_persistent', ),
                        deviceInfo=vim.Description(
                            # TODO: use docker volume name here. Issue #292
                            label="dockerDataVolume",
                            summary="dockerDataVolume", ),
                        unitNumber=disk_slot,
                        controllerKey=controller_key, ), )
    disk_changes = []
    disk_changes.append(disk_spec)

    spec = vim.vm.ConfigSpec()
    spec.deviceChange = disk_changes

    try:
        wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
    except vim.fault.VimFault as ex:
        msg = ex.msg
        # Use metadata (KV) for extra logging
        kv_status_attached, kv_uuid = getStatusAttached(vmdk_path)
        if kv_status_attached and kv_uuid != vm.config.uuid:
            # KV  claims we are attached to a different VM'.
            msg += " disk {0} already attached to VM={1}".format(vmdk_path,
                                                                 kv_uuid)
        return err(msg)

    setStatusAttached(vmdk_path, vm)
    logging.info("Disk %s successfully attached. disk_slot = %d, bus_number = %d",
                 vmdk_path, disk_slot, bus_number)
    return busInfo(disk_slot, bus_number)
Beispiel #21
0
    def _get_vmconfig(self, content, cpu_cores, mem, vmtemplate_moId,
                      vswitch_moId, vswitch_name, data_disk_size, disk_type):
        # 设置网卡和硬盘设备

        vmtemplate = self._get_obj_bymoId(content, [vim.VirtualMachine],
                                          vmtemplate_moId)
        devices = vmtemplate.config.hardware.device
        deviceChange = []
        unitNumber = 0
        disk_temp = vim.vm.device.VirtualDisk()
        for i in devices:
            if isinstance(i, vim.VirtualEthernetCard):
                nic_change0 = vim.VirtualDeviceConfigSpec()
                nic_change0.device = i
                nic_change0.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
                deviceChange.append(nic_change0)
            elif isinstance(i, vim.VirtualDisk):
                if unitNumber <= int(i.unitNumber):
                    unitNumber = int(i.unitNumber)
                    disk_temp = i
        nic_change = vim.VirtualDeviceConfigSpec()
        nic_change.device = vim.vm.device.VirtualVmxnet3()
        # nic_change.device.deviceInfo = vim.Description()
        # nic_change.device.deviceInfo.summary = vswitch_name
        netnic = self._get_obj_bymoId(content, [vim.Network], vswitch_moId)
        if isinstance(netnic, vim.dvs.DistributedVirtualPortgroup):
            nic_change.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo(
            )
            nic_change.device.backing.port = vim.dvs.PortConnection()
            nic_change.device.backing.port.switchUuid = netnic.config.distributedVirtualSwitch.uuid
            nic_change.device.backing.port.portgroupKey = netnic.key
        else:
            nic_change.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(
            )
            nic_change.device.backing.network = netnic
            nic_change.device.backing.deviceName = vswitch_name
            # nic_change.device.backing.deviceName = netnic.name
            nic_change.device.backing.useAutoDetect = False
        nic_change.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo(
        )
        nic_change.device.connectable.startConnected = True
        nic_change.device.connectable.allowGuestControl = True
        nic_change.device.connectable.connected = False
        # nic_change.device.connectable.status = 'untried'
        nic_change.device.wakeOnLanEnabled = True
        nic_change.device.addressType = 'assigned'
        nic_change.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        deviceChange.append(nic_change)
        if data_disk_size != "0":
            unit_number = unitNumber + 1
            if unit_number == 7:
                unit_number += 1
            disk_add = vim.VirtualDeviceConfigSpec()
            disk_add.device = vim.vm.device.VirtualDisk()
            disk_add.device.capacityInKB = (int(data_disk_size)) * 1024 * 1024
            disk_add.device.controllerKey = disk_temp.controllerKey
            disk_add.device.unitNumber = unit_number
            # disk_add.device.key = int(disk_temp.key)+1
            disk_add.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            )
            disk_add.device.backing.diskMode = 'persistent'
            if disk_type == 'thin':
                disk_add.device.backing.thinProvisioned = True
            elif disk_type == 'eager':
                disk_add.device.backing.thinProvisioned = False
                disk_add.device.backing.eagerlyScrub = True
            disk_add.fileOperation = "create"
            disk_add.operation = "add"
            deviceChange.append(disk_add)
        # 设置config(内存、CPU、设备)
        config = vim.VirtualMachineConfigSpec()
        config.memoryMB = (int(mem)) * 1024
        # config.numCoresPerSocket = (int(cpu_cores))/2
        config.numCPUs = int(cpu_cores)
        config.deviceChange = deviceChange
        return config
    def _reassign_ports(self, sg_aggr):
        """
        Reassigns VM to a dvportgroup based on its port's security group set
        """

        if not sg_aggr.ports_to_assign:
            return

        ports = sg_aggr.ports_to_assign
        sg_aggr.ports_to_assign = []

        port_keys_to_drop = defaultdict(list)
        for port in ports:
            sg_set = sg_util.security_group_set(port)
            if not sg_set:
                LOG.debug("Port {} has no security group set, skipping reassignment.".format(port['id']))
                continue
            port_desc = port['port_desc']
            if port_desc.port_group_key == sg_aggr.pg.key:
                # Existing ports can enter the reassignment queue
                # on agent boot before the pg_key has been set
                # on the sg_aggr object. Filter them here.
                continue
            dvs_uuid = port_desc.dvs_uuid
            dvs = self.v_center.get_dvs_by_uuid(dvs_uuid)

            # Configure the backing to the required dvportgroup
            port_connection = vim.DistributedVirtualSwitchPortConnection()
            port_connection.switchUuid = dvs_uuid
            port_connection.portgroupKey = sg_aggr.pg_key
            port_backing = vim.VirtualEthernetCardDistributedVirtualPortBackingInfo()
            port_backing.port = port_connection

            # Specify the device that we are going to edit
            virtual_device = getattr(vim, port_desc.device_type)()
            virtual_device.key = port_desc.device_key
            virtual_device.backing = port_backing
            virtual_device.addressType = "manual"
            virtual_device.macAddress = port_desc.mac_address

            # Create an edit spec for an existing virtual device
            virtual_device_config_spec = vim.VirtualDeviceConfigSpec()
            virtual_device_config_spec.operation = "edit"
            virtual_device_config_spec.device = virtual_device

            # Create a config spec for applying the update to the virtual machine
            vm_config_spec = vim.VirtualMachineConfigSpec()
            vm_config_spec.deviceChange = [virtual_device_config_spec]

            # Queue the update
            vm_ref = vim.VirtualMachine(port_desc.vmobref)
            vm_ref._stub = self.dvs.connection._stub
            if not CONF.AGENT.dry_run:
                self._green.spawn_n(reconfig_vm, vm_ref, vm_config_spec)
            else:
                LOG.debug("Reassign: %s", vm_config_spec)

            # Store old port keys of reassigned VMs
            port_keys_to_drop[dvs_uuid].append(port_desc.port_key)

        # Remove obsolete port binding specs
        """
        This should be fixed in the design instead of adding corrective code!
        Still, it is a cheap fix and saves unnecessary API calls.
        """
        for dvs_uuid, port_keys in six.iteritems(port_keys_to_drop):
            dvs = self.v_center.get_dvs_by_uuid(dvs_uuid)
            dvs.filter_update_specs(lambda x: x.key not in port_keys)

        eventlet.sleep(0)  # yield to allow VM network reassignments to take place