def get_image_vm_generation(self, root_vhd_path, image_meta):
        image_props = image_meta['properties']
        default_vm_gen = self._hostutils.get_default_vm_generation()
        image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN,
                                        default_vm_gen)
        if image_prop_vm not in self._hostutils.get_supported_vm_types():
            LOG.error(
                _LE('Requested VM Generation %s is not supported on '
                    ' this OS.'), image_prop_vm)
            raise vmutils.HyperVException(
                _('Requested VM Generation %s is not supported on this '
                  'OS.') % image_prop_vm)

        vm_gen = VM_GENERATIONS[image_prop_vm]

        if (vm_gen != constants.VM_GEN_1 and root_vhd_path
                and self._vhdutils.get_vhd_format(root_vhd_path)
                == constants.DISK_FORMAT_VHD):
            LOG.error(
                _LE('Requested VM Generation %s, but provided VHD '
                    'instead of VHDX.'), vm_gen)
            raise vmutils.HyperVException(
                _('Requested VM Generation %s, but provided VHD instead of '
                  'VHDX.') % vm_gen)

        return vm_gen
    def _check_and_update_bdm(self, slot_map, vm_gen, bdm):
        disk_bus = bdm.get('disk_bus')
        if not disk_bus:
            bdm['disk_bus'] = self._DEFAULT_BUS
        elif disk_bus not in self._VALID_BUS[vm_gen]:
            msg = _("Hyper-V does not support bus type %(disk_bus)s "
                    "for generation %(vm_gen)s instances."
                    ) % {'disk_bus': disk_bus,
                         'vm_gen': vm_gen}
            raise exception.InvalidDiskInfo(reason=msg)

        device_type = bdm.get('device_type')
        if not device_type:
            bdm['device_type'] = 'disk'
        elif device_type != 'disk':
            msg = _("Hyper-V does not support disk type %s for ephemerals "
                    "or volumes.") % device_type
            raise exception.InvalidDiskInfo(reason=msg)

        (bdm['drive_addr'],
         bdm['ctrl_disk_addr']) = self._get_available_controller_slot(
            bdm['disk_bus'], slot_map)

        # make sure that boot_index is set.
        bdm['boot_index'] = bdm.get('boot_index')
Exemple #3
0
    def _configure_remotefx(self, instance, vm_gen, config):
        if not CONF.hyperv.enable_remotefx:
            reason = _("enable_remotefx configuration option needs to be set "
                       "to True in order to use RemoteFX")
            raise exception.InstanceUnacceptable(instance_id=instance.uuid,
                                                 reason=reason)

        if not self._hostutils.check_server_feature(
                self._hostutils.FEATURE_RDS_VIRTUALIZATION):
            reason = _("The RDS-Virtualization feature must be installed in "
                       "order to use RemoteFX")
            raise exception.InstanceUnacceptable(instance_id=instance.uuid,
                                                 reason=reason)

        if not self._vmutils.vm_gen_supports_remotefx(vm_gen):
            reason = _("RemoteFX is not supported on generation %s virtual "
                       "machines on this version of Windows.") % vm_gen
            raise exception.InstanceUnacceptable(instance_id=instance.uuid,
                                                 reason=reason)

        instance_name = instance.name
        LOG.debug('Configuring RemoteFX for instance: %s', instance_name)
        remotefx_args = config.split(',')
        remotefx_max_resolution = remotefx_args[0]
        remotefx_monitor_count = int(remotefx_args[1])
        remotefx_vram = remotefx_args[2] if len(remotefx_args) == 3 else None
        vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None

        self._vmutils.enable_remotefx_video_adapter(instance_name,
                                                    remotefx_monitor_count,
                                                    remotefx_max_resolution,
                                                    vram_bytes)
    def _check_target_flavor(self, instance, flavor, block_device_info):
        new_root_gb = flavor.root_gb
        curr_root_gb = instance.root_gb

        ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
        curr_eph_gb = sum(eph.get('size', 0) for eph in ephemerals)
        new_eph_gb = instance.ephemeral_gb

        if new_root_gb < curr_root_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the root disk to a smaller size. "
                             "Current size: %(curr_root_gb)s GB. Requested "
                             "size: %(new_root_gb)s GB.") % {
                                 'curr_root_gb': curr_root_gb,
                                 'new_root_gb': new_root_gb}))

        if new_eph_gb < curr_eph_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the ephemeral disk(s) to a smaller"
                             " size. Current total ephemeral size: "
                             "%(curr_eph_gb)s GB. Requested total size: "
                             "%(new_eph_gb)s GB") % {
                                 'curr_eph_gb': curr_eph_gb,
                                 'new_eph_gb': new_eph_gb}))
    def _check_target_flavor(self, instance, flavor, block_device_info):
        new_root_gb = flavor.root_gb
        curr_root_gb = instance.root_gb

        ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
        curr_eph_gb = sum(eph.get('size', 0) for eph in ephemerals)
        new_eph_gb = instance.ephemeral_gb

        if new_root_gb < curr_root_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the root disk to a smaller size. "
                             "Current size: %(curr_root_gb)s GB. Requested "
                             "size: %(new_root_gb)s GB.") % {
                                 'curr_root_gb': curr_root_gb,
                                 'new_root_gb': new_root_gb
                             }))

        if new_eph_gb < curr_eph_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the ephemeral disk(s) to a smaller"
                             " size. Current total ephemeral size: "
                             "%(curr_eph_gb)s GB. Requested total size: "
                             "%(new_eph_gb)s GB") % {
                                 'curr_eph_gb': curr_eph_gb,
                                 'new_eph_gb': new_eph_gb
                             }))
    def _configure_remotefx(self, instance, vm_gen, config):
        if not CONF.hyperv.enable_remotefx:
            reason = _("enable_remotefx configuration option needs to be set "
                       "to True in order to use RemoteFX")
            raise exception.InstanceUnacceptable(instance_id=instance.uuid,
                                                 reason=reason)

        if not self._hostutils.check_server_feature(
                        self._hostutils.FEATURE_RDS_VIRTUALIZATION):
            reason = _("The RDS-Virtualization feature must be installed in "
                       "order to use RemoteFX")
            raise exception.InstanceUnacceptable(instance_id=instance.uuid,
                                                 reason=reason)

        if not self._vmutils.vm_gen_supports_remotefx(vm_gen):
            reason = _("RemoteFX is not supported on generation %s virtual "
                       "machines on this version of Windows.") % vm_gen
            raise exception.InstanceUnacceptable(instance_id=instance.uuid,
                                                 reason=reason)

        instance_name = instance.name
        LOG.debug('Configuring RemoteFX for instance: %s', instance_name)
        remotefx_args = config.split(',')
        remotefx_max_resolution = remotefx_args[0]
        remotefx_monitor_count = int(remotefx_args[1])
        remotefx_vram = remotefx_args[2] if len(remotefx_args) == 3 else None
        vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None

        self._vmutils.enable_remotefx_video_adapter(
            instance_name,
            remotefx_monitor_count,
            remotefx_max_resolution,
            vram_bytes)
Exemple #7
0
    def _check_and_update_bdm(self, slot_map, vm_gen, bdm):
        disk_bus = bdm.get('disk_bus')
        if not disk_bus:
            bdm['disk_bus'] = self._DEFAULT_BUS
        elif disk_bus not in self._VALID_BUS[vm_gen]:
            msg = _("Hyper-V does not support bus type %(disk_bus)s "
                    "for generation %(vm_gen)s instances.") % {
                        'disk_bus': disk_bus,
                        'vm_gen': vm_gen
                    }
            raise exception.InvalidDiskInfo(reason=msg)

        device_type = bdm.get('device_type')
        if not device_type:
            bdm['device_type'] = 'disk'
        elif device_type != 'disk':
            msg = _("Hyper-V does not support disk type %s for ephemerals "
                    "or volumes.") % device_type
            raise exception.InvalidDiskInfo(reason=msg)

        (bdm['drive_addr'],
         bdm['ctrl_disk_addr']) = self._get_available_controller_slot(
             bdm['disk_bus'], slot_map)

        # make sure that boot_index is set.
        bdm['boot_index'] = bdm.get('boot_index')
    def enable_remotefx_video_adapter(self, vm_name, monitor_count,
                                      max_resolution):
        vm = self._lookup_vm_check(vm_name)

        max_res_value = self._remote_fx_res_map.get(max_resolution)
        if max_res_value is None:
            raise vmutils.HyperVException(
                _("Unsupported RemoteFX resolution: "
                  "%s") % max_resolution)

        synth_3d_video_pool = self._conn.Msvm_Synth3dVideoPool()[0]
        if not synth_3d_video_pool.IsGpuCapable:
            raise vmutils.HyperVException(
                _("To enable RemoteFX on Hyper-V at "
                  "least one GPU supporting DirectX "
                  "11 is required"))
        if not synth_3d_video_pool.IsSlatCapable:
            raise vmutils.HyperVException(
                _("To enable RemoteFX on Hyper-V it "
                  "is required that the host CPUs "
                  "support SLAT"))

        vmsettings = vm.associators(
            wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
        rasds = vmsettings[0].associators(
            wmi_result_class=self._CIM_RES_ALLOC_SETTING_DATA_CLASS)

        if [
                r for r in rasds
                if r.ResourceSubType == self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE
        ]:
            raise vmutils.HyperVException(
                _("RemoteFX is already configured "
                  "for this VM"))

        synth_disp_ctrl_res_list = [
            r for r in rasds
            if r.ResourceSubType == self._SYNTH_DISP_CTRL_RES_SUB_TYPE
        ]
        if synth_disp_ctrl_res_list:
            self._remove_virt_resource(synth_disp_ctrl_res_list[0], vm.path_())

        synth_3d_disp_ctrl_res = self._get_new_resource_setting_data(
            self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE,
            self._SYNTH_3D_DISP_ALLOCATION_SETTING_DATA_CLASS)

        synth_3d_disp_ctrl_res.MaximumMonitors = monitor_count
        synth_3d_disp_ctrl_res.MaximumScreenResolution = max_res_value

        self._add_virt_resource(synth_3d_disp_ctrl_res, vm.path_())

        s3_disp_ctrl_res = [
            r for r in rasds
            if r.ResourceSubType == self._S3_DISP_CTRL_RES_SUB_TYPE
        ][0]

        s3_disp_ctrl_res.Address = self._DISP_CTRL_ADDRESS_DX_11

        self._modify_virt_resource(s3_disp_ctrl_res, vm.path_())
 def _get_vm(self, conn_v2, vm_name):
     vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
     n = len(vms)
     if not n:
         raise exception.InstanceNotFound(_("VM not found: %s") % vm_name)
     elif n > 1:
         raise vmutils.HyperVException(_("Duplicate VM name found: %s") % vm_name)
     return vms[0]
Exemple #10
0
 def _get_vm(self, conn_v2, vm_name):
     vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
     n = len(vms)
     if not n:
         raise exception.NotFound(_('VM not found: %s') % vm_name)
     elif n > 1:
         raise vmutils.HyperVException(
             _('Duplicate VM name found: %s') % vm_name)
     return vms[0]
Exemple #11
0
 def _check_device_paths(self, device_paths):
     if len(device_paths) > 1:
         err_msg = _("Multiple disk paths were found: %s. This can "
                     "occur if multipath is used and MPIO is not "
                     "properly configured, thus not claiming the device "
                     "paths. This issue must be addressed urgently as "
                     "it can lead to data corruption.")
         raise exception.InvalidDevicePath(err_msg % device_paths)
     elif not device_paths:
         err_msg = _("Could not find the physical disk "
                     "path for the requested volume.")
         raise exception.DiskNotFound(err_msg)
 def _get_conn_v2(self, host="localhost"):
     try:
         return wmi.WMI(moniker="//%s/root/virtualization/v2" % host)
     except wmi.x_wmi as ex:
         LOG.exception(_LE("Get version 2 connection error"))
         if ex.com_error.hresult == -2147217394:
             msg = _('Live migration is not supported on target host "%s"') % host
         elif ex.com_error.hresult == -2147023174:
             msg = _('Target live migration host "%s" is unreachable') % host
         else:
             msg = _("Live migration failed: %s") % ex.message
         raise vmutils.HyperVException(msg)
 def check_live_migration_config(self):
     conn_v2 = self._get_conn_v2()
     migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
     vsmssds = migration_svc.associators(
         wmi_association_class="Msvm_ElementSettingData",
         wmi_result_class="Msvm_VirtualSystemMigrationServiceSettingData",
     )
     vsmssd = vsmssds[0]
     if not vsmssd.EnableVirtualSystemMigration:
         raise vmutils.HyperVException(_("Live migration is not enabled on this host"))
     if not migration_svc.MigrationServiceListenerIPAddressList:
         raise vmutils.HyperVException(_("Live migration networks are not configured on this host"))
Exemple #14
0
 def _check_device_paths(self, device_paths):
     if len(device_paths) > 1:
         err_msg = _("Multiple disk paths were found: %s. This can "
                     "occur if multipath is used and MPIO is not "
                     "properly configured, thus not claiming the device "
                     "paths. This issue must be addressed urgently as "
                     "it can lead to data corruption.")
         raise exception.InvalidDevicePath(err_msg % device_paths)
     elif not device_paths:
         err_msg = _("Could not find the physical disk "
                     "path for the requested volume.")
         raise exception.DiskNotFound(err_msg)
Exemple #15
0
 def check_live_migration_config(self):
     conn_v2 = self._get_conn_v2()
     migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
     vsmssds = migration_svc.associators(
         wmi_association_class='Msvm_ElementSettingData',
         wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')
     vsmssd = vsmssds[0]
     if not vsmssd.EnableVirtualSystemMigration:
         raise vmutils.HyperVException(
             _('Live migration is not enabled on this host'))
     if not migration_svc.MigrationServiceListenerIPAddressList:
         raise vmutils.HyperVException(
             _('Live migration networks are not configured on this host'))
    def enable_remotefx_video_adapter(self, vm_name, monitor_count,
                                      max_resolution):
        vm = self._lookup_vm_check(vm_name)

        max_res_value = self._remote_fx_res_map.get(max_resolution)
        if max_res_value is None:
            raise vmutils.HyperVException(_("Unsupported RemoteFX resolution: "
                                            "%s") % max_resolution)

        synth_3d_video_pool = self._conn.Msvm_Synth3dVideoPool()[0]
        if not synth_3d_video_pool.IsGpuCapable:
            raise vmutils.HyperVException(_("To enable RemoteFX on Hyper-V at "
                                            "least one GPU supporting DirectX "
                                            "11 is required"))
        if not synth_3d_video_pool.IsSlatCapable:
            raise vmutils.HyperVException(_("To enable RemoteFX on Hyper-V it "
                                            "is required that the host CPUs "
                                            "support SLAT"))

        rasds = self._conn.query(
                "SELECT * from %(class_name)s "
                "WHERE InstanceID LIKE 'Microsoft:%(instance_id)s%%'" % {
                    'class_name': self._CIM_RES_ALLOC_SETTING_DATA_CLASS,
                    'instance_id': vm.ConfigurationID})

        if [r for r in rasds if r.ResourceSubType ==
                self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE]:
            raise vmutils.HyperVException(_("RemoteFX is already configured "
                                            "for this VM"))

        synth_disp_ctrl_res_list = [r for r in rasds if r.ResourceSubType ==
                                    self._SYNTH_DISP_CTRL_RES_SUB_TYPE]
        if synth_disp_ctrl_res_list:
            self._remove_virt_resource(synth_disp_ctrl_res_list[0], vm.path_())

        synth_3d_disp_ctrl_res = self._get_new_resource_setting_data(
            self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE,
            self._SYNTH_3D_DISP_ALLOCATION_SETTING_DATA_CLASS)

        synth_3d_disp_ctrl_res.MaximumMonitors = monitor_count
        synth_3d_disp_ctrl_res.MaximumScreenResolution = max_res_value

        self._add_virt_resource(synth_3d_disp_ctrl_res, vm.path_())

        s3_disp_ctrl_res = [r for r in rasds if r.ResourceSubType ==
                            self._S3_DISP_CTRL_RES_SUB_TYPE][0]

        s3_disp_ctrl_res.Address = self._DISP_CTRL_ADDRESS_DX_11

        self._modify_virt_resource(s3_disp_ctrl_res, vm.path_())
Exemple #17
0
    def rescue_instance(self, context, instance, network_info, image_meta,
                        rescue_password):
        rescue_image_id = image_meta.get('id') or instance.image_ref
        rescue_vhd_path = self._create_root_vhd(
            context, instance, rescue_image_id=rescue_image_id)

        rescue_vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)
        vm_gen = self._vmutils.get_vm_generation(instance.name)
        if rescue_vm_gen != vm_gen:
            err_msg = _('The requested rescue image requires a different VM '
                        'generation than the actual rescued instance. '
                        'Rescue image VM generation: %(rescue_vm_gen)s. '
                        'Rescued instance VM generation: %(vm_gen)s.') % dict(
                            rescue_vm_gen=rescue_vm_gen, vm_gen=vm_gen)
            raise exception.ImageUnacceptable(reason=err_msg,
                                              image_id=rescue_image_id)

        self.check_vm_image_type(instance.uuid, rescue_vm_gen, rescue_vhd_path)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        if not root_vhd_path:
            err_msg = _('Instance root disk image could not be found. '
                        'Rescuing instances booted from volume is '
                        'not supported.')
            raise exception.InstanceNotRescuable(reason=err_msg,
                                                 instance_id=instance.uuid)

        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name,
                                     root_vhd_path,
                                     is_physical=False)
        self._attach_drive(instance.name, rescue_vhd_path, 0,
                           self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._vmutils.attach_scsi_drive(instance.name,
                                        root_vhd_path,
                                        drive_type=constants.DISK)

        if configdrive.required_by(instance):
            self._detach_config_drive(instance.name)
            rescue_configdrive_path = self._create_config_drive(
                instance,
                injected_files=None,
                admin_password=rescue_password,
                network_info=network_info,
                rescue=True)
            self.attach_config_drive(instance, rescue_configdrive_path, vm_gen)

        self.power_on(instance)
Exemple #18
0
    def rescue_instance(self, context, instance, network_info, image_meta,
                        rescue_password):
        rescue_image_id = image_meta.get('id') or instance.image_ref
        rescue_vhd_path = self._create_root_vhd(
            context, instance, rescue_image_id=rescue_image_id)

        rescue_vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)
        vm_gen = self._vmutils.get_vm_generation(instance.name)
        if rescue_vm_gen != vm_gen:
            err_msg = _('The requested rescue image requires a different VM '
                        'generation than the actual rescued instance. '
                        'Rescue image VM generation: %(rescue_vm_gen)s. '
                        'Rescued instance VM generation: %(vm_gen)s.') % dict(
                            rescue_vm_gen=rescue_vm_gen,
                            vm_gen=vm_gen)
            raise exception.ImageUnacceptable(reason=err_msg,
                                              image_id=rescue_image_id)

        self.check_vm_image_type(instance.uuid, rescue_vm_gen, rescue_vhd_path)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        if not root_vhd_path:
            err_msg = _('Instance root disk image could not be found. '
                        'Rescuing instances booted from volume is '
                        'not supported.')
            raise exception.InstanceNotRescuable(reason=err_msg,
                                                 instance_id=instance.uuid)

        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
                                     is_physical=False)
        self._attach_drive(instance.name, rescue_vhd_path, 0,
                           self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._vmutils.attach_scsi_drive(instance.name, root_vhd_path,
                                        drive_type=constants.DISK)

        if configdrive.required_by(instance):
            self._detach_config_drive(instance.name)
            rescue_configdrive_path = self._create_config_drive(
                instance,
                injected_files=None,
                admin_password=rescue_password,
                network_info=network_info,
                rescue=True)
            self.attach_config_drive(instance, rescue_configdrive_path,
                                     vm_gen)

        self.power_on(instance)
Exemple #19
0
 def _get_conn_v2(self, host='localhost'):
     try:
         return wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
     except wmi.x_wmi as ex:
         LOG.exception(_LE('Get version 2 connection error'))
         if ex.com_error.hresult == -2147217394:
             msg = (
                 _('Live migration is not supported on target host "%s"') %
                 host)
         elif ex.com_error.hresult == -2147023174:
             msg = (_('Target live migration host "%s" is unreachable') %
                    host)
         else:
             msg = _('Live migration failed: %s') % ex.message
         raise vmutils.HyperVException(msg)
    def finish_revert_migration(self, context, instance, network_info,
                                block_device_info=None, power_on=True):
        LOG.debug("finish_revert_migration called", instance=instance)

        instance_name = instance.name
        self._revert_migration_files(instance_name)

        image_meta = self._imagecache.get_image_details(context, instance)
        vm_gen = self._vmops.get_image_vm_generation(instance.uuid, image_meta)

        self._block_dev_manager.validate_and_update_bdi(
            instance, image_meta, vm_gen, block_device_info)
        root_device = block_device_info['root_disk']

        if root_device['type'] == constants.DISK:
            root_device['path'] = self._pathutils.lookup_root_vhd_path(
                instance_name)
            if not root_device['path']:
                raise exception.DiskNotFound(
                    _("Cannot find boot VHD file for instance: %s")
                    % instance_name)

        ephemerals = block_device_info['ephemerals']
        self._check_ephemeral_disks(instance, ephemerals)

        self._vmops.create_instance(instance, network_info,
                                    root_device, block_device_info, vm_gen,
                                    image_meta)

        self._check_and_attach_config_drive(instance, vm_gen)
        self._vmops.set_boot_order(vm_gen, block_device_info, instance_name)
        if power_on:
            self._vmops.power_on(instance, network_info=network_info)
    def get_disk_resource_path(self, connection_info):
        for attempt in range(self._MAX_RESCAN_COUNT):
            disk_paths = set()

            self._diskutils.rescan_disks()
            volume_mappings = self._get_fc_volume_mappings(connection_info)

            LOG.debug("Retrieved volume mappings %(vol_mappings)s "
                      "for volume %(conn_info)s",
                      dict(vol_mappings=volume_mappings,
                           conn_info=connection_info))

            # Because of MPIO, we may not be able to get the device name
            # from a specific mapping if the disk was accessed through
            # an other HBA at that moment. In that case, the device name
            # will show up as an empty string.
            for mapping in volume_mappings:
                device_name = mapping['device_name']
                if device_name:
                    disk_paths.add(device_name)

            if disk_paths:
                self._check_device_paths(disk_paths)
                disk_path = list(disk_paths)[0]
                return self._get_mounted_disk_path_by_dev_name(
                    disk_path)

        err_msg = _("Could not find the physical disk "
                    "path for the requested volume.")
        raise exception.DiskNotFound(err_msg)
 def execute(self, *args, **kwargs):
     stdout_value, stderr_value = utils.execute(*args, **kwargs)
     if stdout_value.find('The operation completed successfully') == -1:
         raise vmutils.HyperVException(
             _('An error has occurred when '
               'calling the iscsi initiator: %s') % stdout_value)
     return stdout_value
Exemple #23
0
    def get_disk_resource_path(self, connection_info):
        for attempt in range(self._MAX_RESCAN_COUNT):
            disk_paths = set()

            self._diskutils.rescan_disks()
            volume_mappings = self._get_fc_volume_mappings(connection_info)

            LOG.debug(
                "Retrieved volume mappings %(vol_mappings)s "
                "for volume %(conn_info)s",
                dict(vol_mappings=volume_mappings, conn_info=connection_info))

            # Because of MPIO, we may not be able to get the device name
            # from a specific mapping if the disk was accessed through
            # an other HBA at that moment. In that case, the device name
            # will show up as an empty string.
            for mapping in volume_mappings:
                device_name = mapping['device_name']
                if device_name:
                    disk_paths.add(device_name)

            if disk_paths:
                self._check_device_paths(disk_paths)
                disk_path = list(disk_paths)[0]
                return self._get_mounted_disk_path_by_dev_name(disk_path)

        err_msg = _("Could not find the physical disk "
                    "path for the requested volume.")
        raise exception.DiskNotFound(err_msg)
    def host_maintenance_mode(self, host, mode):
        """Starts/Stops host maintenance. On start, it triggers
        guest VMs evacuation.
        """
        ctxt = context.get_admin_context()

        if not mode:
            self._set_service_state(host=host,
                                    binary='nova-compute',
                                    is_disabled=False)
            LOG.info(_LI('Host is no longer under maintenance.'))
            return 'off_maintenance'

        self._set_service_state(host=host,
                                binary='nova-compute',
                                is_disabled=True)
        vms_uuids = self._vmops.list_instance_uuids()
        for vm_uuid in vms_uuids:
            self._wait_for_instance_pending_task(ctxt, vm_uuid)

        vm_names = self._vmutils.list_instances()
        for vm_name in vm_names:
            self._migrate_vm(ctxt, vm_name, host)

        vms_uuid_after_migration = self._vmops.list_instance_uuids()
        remaining_vms = len(vms_uuid_after_migration)
        if remaining_vms == 0:
            LOG.info(
                _LI('All vms have been migrated successfully.'
                    'Host is down for maintenance'))
            return 'on_maintenance'
        raise exception.MigrationError(reason=_(
            'Not all vms have been migrated: %s remaining instances.') %
                                       remaining_vms)
Exemple #25
0
    def unrescue_instance(self, instance):
        self.power_off(instance)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True)

        if instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path):
            err_msg = _("Missing instance root and/or rescue image. " "The instance cannot be unrescued.")
            raise vmutils.HyperVException(err_msg)

        vm_gen = self._vmutils.get_vm_gen(instance.name)
        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False)
        if rescue_vhd_path:
            self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False)
            fileutils.delete_if_exists(rescue_vhd_path)
        self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._detach_config_drive(instance.name, rescue=True, delete=True)

        # Reattach the configdrive, if exists.
        configdrive_path = self._pathutils.lookup_configdrive_path(instance.name)
        if configdrive_path:
            self.attach_config_drive(instance, configdrive_path, vm_gen)

        self.power_on(instance)
Exemple #26
0
    def unrescue_instance(self, instance):
        self.power_off(instance)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name,
                                                               rescue=True)

        if (instance.vm_state == vm_states.RESCUED and
                not (rescue_vhd_path and root_vhd_path)):
            err_msg = _('Missing instance root and/or rescue image.')
            raise exception.InstanceNotRescuable(reason=err_msg,
                                                 instance_id=instance.uuid)

        vm_gen = self._vmutils.get_vm_generation(instance.name)
        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
                                     is_physical=False)
        if rescue_vhd_path:
            self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path,
                                         is_physical=False)
            fileutils.delete_if_exists(rescue_vhd_path)
        self._attach_drive(instance.name, root_vhd_path, 0,
                           self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._detach_config_drive(instance.name, rescue=True, delete=True)

        # Reattach the configdrive, if exists and not already attached.
        configdrive_path = self._pathutils.lookup_configdrive_path(
            instance.name)
        if configdrive_path and not self._vmutils.is_disk_attached(
                configdrive_path, is_physical=False):
            self.attach_config_drive(instance, configdrive_path, vm_gen)

        self.power_on(instance)
Exemple #27
0
    def login_storage_target(self, connection_info):
        data = connection_info['data']
        target_lun = data['target_lun']
        target_iqn = data['target_iqn']
        target_portal = data['target_portal']
        auth_method = data.get('auth_method')
        auth_username = data.get('auth_username')
        auth_password = data.get('auth_password')

        if auth_method and auth_method.upper() != 'CHAP':
            raise vmutils.HyperVException(
                _("Cannot log in target %(target_iqn)s. Unsupported iSCSI "
                  "authentication method: %(auth_method)s.") %
                 {'target_iqn': target_iqn,
                  'auth_method': auth_method})

        # Check if we already logged in
        if self._volutils.get_device_number_for_target(target_iqn, target_lun):
            LOG.debug("Already logged in on storage target. No need to "
                      "login. Portal: %(target_portal)s, "
                      "IQN: %(target_iqn)s, LUN: %(target_lun)s",
                      {'target_portal': target_portal,
                       'target_iqn': target_iqn, 'target_lun': target_lun})
        else:
            LOG.debug("Logging in on storage target. Portal: "
                      "%(target_portal)s, IQN: %(target_iqn)s, "
                      "LUN: %(target_lun)s",
                      {'target_portal': target_portal,
                       'target_iqn': target_iqn, 'target_lun': target_lun})
            self._volutils.login_storage_target(target_lun, target_iqn,
                                                target_portal, auth_username,
                                                auth_password)
            # Wait for the target to be mounted
            self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
Exemple #28
0
    def parse_disk_qos_specs(self, qos_specs):
        total_bytes_sec = int(qos_specs.get('total_bytes_sec', 0))
        min_bytes_sec = int(qos_specs.get('min_bytes_sec', 0))

        total_iops = int(
            qos_specs.get('total_iops_sec',
                          self._bytes_per_sec_to_iops(total_bytes_sec)))
        min_iops = int(
            qos_specs.get('min_iops_sec',
                          self._bytes_per_sec_to_iops(min_bytes_sec)))

        if total_iops and total_iops < min_iops:
            err_msg = (_("Invalid QoS specs: minimum IOPS cannot be greater "
                         "than maximum IOPS. "
                         "Requested minimum IOPS: %(min_iops)s "
                         "Requested maximum IOPS: %(total_iops)s.") % {
                             'min_iops': min_iops,
                             'total_iops': total_iops
                         })
            raise exception.Invalid(err_msg)

        unsupported_specs = [
            spec for spec in qos_specs if spec not in self._SUPPORTED_QOS_SPECS
        ]
        if unsupported_specs:
            LOG.warning(
                _LW('Ignoring unsupported qos specs: '
                    '%(unsupported_specs)s. '
                    'Supported qos specs: %(supported_qos_speces)s'), {
                        'unsupported_specs': unsupported_specs,
                        'supported_qos_speces': self._SUPPORTED_QOS_SPECS
                    })

        return min_iops, total_iops
Exemple #29
0
        def get_disk_path():
            disk_paths = set()
            volume_mappings = self._get_fc_volume_mappings(connection_info)
            if not volume_mappings:
                LOG.debug(
                    "Could not find FC mappings for volume "
                    "%(conn_info)s. Rescanning disks.",
                    dict(conn_info=connection_info))
                self._diskutils.rescan_disks()
            else:
                # Because of MPIO, we may not be able to get the device name
                # from a specific mapping if the disk was accessed through
                # an other HBA at that moment. In that case, the device name
                # will show up as an empty string.
                for mapping in volume_mappings:
                    device_name = mapping['device_name']
                    if device_name:
                        disk_paths.add(device_name)

                if disk_paths:
                    self._check_device_paths(disk_paths)
                    disk_path = list(disk_paths)[0]
                    return self._get_mounted_disk_path_by_dev_name(disk_path)

            err_msg = _("Could not find the physical disk "
                        "path for the requested volume.")
            raise exception.DiskNotFound(err_msg)
    def unrescue_instance(self, instance):
        self.power_off(instance)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name,
                                                               rescue=True)

        if (instance.vm_state == vm_states.RESCUED
                and not (rescue_vhd_path and root_vhd_path)):
            err_msg = _('Missing instance root and/or rescue image. '
                        'The instance cannot be unrescued.')
            raise vmutils.HyperVException(err_msg)

        vm_gen = self._vmutils.get_vm_gen(instance.name)
        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name,
                                     root_vhd_path,
                                     is_physical=False)
        if rescue_vhd_path:
            self._vmutils.detach_vm_disk(instance.name,
                                         rescue_vhd_path,
                                         is_physical=False)
            fileutils.delete_if_exists(rescue_vhd_path)
        self._attach_drive(instance.name, root_vhd_path, 0,
                           self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._detach_config_drive(instance.name, rescue=True, delete=True)

        # Reattach the configdrive, if exists.
        configdrive_path = self._pathutils.lookup_configdrive_path(
            instance.name)
        if configdrive_path:
            self.attach_config_drive(instance, configdrive_path, vm_gen)

        self.power_on(instance)
Exemple #31
0
    def parse_disk_qos_specs(self, qos_specs):
        total_bytes_sec = int(qos_specs.get('total_bytes_sec', 0))
        min_bytes_sec = int(qos_specs.get('min_bytes_sec', 0))

        total_iops = int(qos_specs.get('total_iops_sec',
                                       self._bytes_per_sec_to_iops(
                                           total_bytes_sec)))
        min_iops = int(qos_specs.get('min_iops_sec',
                                     self._bytes_per_sec_to_iops(
                                         min_bytes_sec)))

        if total_iops and total_iops < min_iops:
            err_msg = (_("Invalid QoS specs: minimum IOPS cannot be greater "
                         "than maximum IOPS. "
                         "Requested minimum IOPS: %(min_iops)s "
                         "Requested maximum IOPS: %(total_iops)s.") %
                       {'min_iops': min_iops,
                        'total_iops': total_iops})
            raise exception.Invalid(err_msg)

        unsupported_specs = [spec for spec in qos_specs if
                             spec not in self._SUPPORTED_QOS_SPECS]
        if unsupported_specs:
            LOG.warning(_LW('Ignoring unsupported qos specs: '
                            '%(unsupported_specs)s. '
                            'Supported qos specs: %(supported_qos_speces)s'),
                        {'unsupported_specs': unsupported_specs,
                         'supported_qos_speces': self._SUPPORTED_QOS_SPECS})

        return min_iops, total_iops
Exemple #32
0
    def host_maintenance_mode(self, host, mode):
        """Starts/Stops host maintenance. On start, it triggers
        guest VMs evacuation.
        """
        ctxt = context.get_admin_context()

        if not mode:
            self._set_service_state(host=host, binary='nova-compute',
                                    is_disabled=False)
            LOG.info(_LI('Host is no longer under maintenance.'))
            return 'off_maintenance'

        self._set_service_state(host=host, binary='nova-compute',
                                is_disabled=True)
        vms_uuids = self._vmops.list_instance_uuids()
        for vm_uuid in vms_uuids:
            self._wait_for_instance_pending_task(ctxt, vm_uuid)

        vm_names = self._vmutils.list_instances()
        for vm_name in vm_names:
            self._migrate_vm(ctxt, vm_name, host)

        vms_uuid_after_migration = self._vmops.list_instance_uuids()
        remaining_vms = len(vms_uuid_after_migration)
        if remaining_vms == 0:
            LOG.info(_LI('All vms have been migrated successfully.'
                         'Host is down for maintenance'))
            return 'on_maintenance'
        raise exception.MigrationError(
            reason=_('Not all vms have been migrated: %s remaining instances.')
            % remaining_vms)
Exemple #33
0
        def get_disk_path():
            disk_paths = set()
            volume_mappings = self._get_fc_volume_mappings(connection_info)
            if not volume_mappings:
                LOG.debug("Could not find FC mappings for volume "
                          "%(conn_info)s. Rescanning disks.",
                          dict(conn_info=connection_info))
                self._diskutils.rescan_disks()
            else:
                # Because of MPIO, we may not be able to get the device name
                # from a specific mapping if the disk was accessed through
                # an other HBA at that moment. In that case, the device name
                # will show up as an empty string.
                for mapping in volume_mappings:
                    device_name = mapping['device_name']
                    if device_name:
                        disk_paths.add(device_name)

                if disk_paths:
                    self._check_device_paths(disk_paths)
                    disk_path = list(disk_paths)[0]
                    return self._get_mounted_disk_path_by_dev_name(
                        disk_path)

            err_msg = _("Could not find the physical disk "
                        "path for the requested volume.")
            raise exception.DiskNotFound(err_msg)
    def unrescue_instance(self, instance):
        self.power_off(instance)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name,
                                                               rescue=True)

        if (instance.vm_state == vm_states.RESCUED and
                not (rescue_vhd_path and root_vhd_path)):
            err_msg = _('Missing instance root and/or rescue image.')
            raise exception.InstanceNotRescuable(reason=err_msg,
                                                 instance_id=instance.uuid)

        vm_gen = self._vmutils.get_vm_generation(instance.name)
        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
                                     is_physical=False)
        if rescue_vhd_path:
            self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path,
                                         is_physical=False)
            fileutils.delete_if_exists(rescue_vhd_path)
        self._attach_drive(instance.name, root_vhd_path, 0,
                           self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._detach_config_drive(instance.name, rescue=True, delete=True)

        # Reattach the configdrive, if exists and not already attached.
        configdrive_path = self._pathutils.lookup_configdrive_path(
            instance.name)
        if configdrive_path and not self._vmutils.is_disk_attached(
                configdrive_path, is_physical=False):
            self.attach_config_drive(instance, configdrive_path, vm_gen)
        self.power_on(instance)
 def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
     curr_size = vhd_info['MaxInternalSize']
     if new_size < curr_size:
         raise vmutils.VHDResizeException(_("Cannot resize a VHD "
                                            "to a smaller size"))
     elif new_size > curr_size:
         self._resize_vhd(vhd_path, new_size)
 def execute(self, *args, **kwargs):
     stdout_value, stderr_value = utils.execute(*args, **kwargs)
     if stdout_value.find('The operation completed successfully') == -1:
         raise vmutils.HyperVException(_('An error has occurred when '
                                         'calling the iscsi initiator: %s')
                                       % stdout_value)
     return stdout_value
Exemple #37
0
    def create_instance(self, instance, network_info, block_device_info,
                        root_vhd_path, eph_vhd_path, vm_gen, image_meta):
        instance_name = instance.name
        instance_path = os.path.join(CONF.instances_path, instance_name)

        self._vmutils.create_vm(instance_name,
                                instance.memory_mb,
                                instance.vcpus,
                                CONF.hyperv.limit_cpu_features,
                                CONF.hyperv.dynamic_memory_ratio,
                                vm_gen,
                                instance_path,
                                [instance.uuid])

        flavor_extra_specs = instance.flavor.extra_specs
        remote_fx_config = flavor_extra_specs.get(
                constants.FLAVOR_REMOTE_FX_EXTRA_SPEC_KEY)
        if remote_fx_config:
            if vm_gen == constants.VM_GEN_2:
                raise vmutils.HyperVException(_("RemoteFX is not supported "
                                                "on generation 2 virtual "
                                                "machines."))
            else:
                self._configure_remotefx(instance, remote_fx_config)

        self._vmutils.create_scsi_controller(instance_name)
        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        ctrl_disk_addr = 0
        if root_vhd_path:
            self._attach_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr,
                               controller_type)
            ctrl_disk_addr += 1

        if eph_vhd_path:
            self._attach_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr,
                               controller_type)

        # If ebs_root is False, the first volume will be attached to SCSI
        # controller. Generation 2 VMs only has a SCSI controller.
        ebs_root = vm_gen is not constants.VM_GEN_2 and root_vhd_path is None
        self._volumeops.attach_volumes(block_device_info,
                                       instance_name,
                                       ebs_root)

        serial_ports = self._get_image_serial_port_settings(image_meta)
        self._create_vm_com_port_pipes(instance, serial_ports)
        self._set_instance_disk_qos_specs(instance)

        for vif in network_info:
            LOG.debug('Creating nic for instance', instance=instance)
            self._vmutils.create_nic(instance_name,
                                     vif['id'],
                                     vif['address'])
            vif_driver = self._get_vif_driver(vif.get('type'))
            vif_driver.plug(instance, vif)

        if CONF.hyperv.enable_instance_metrics_collection:
            self._vmutils.enable_vm_metrics_collection(instance_name)
    def create_instance(self, instance, network_info, block_device_info,
                        root_vhd_path, eph_vhd_path, vm_gen, image_meta):
        instance_name = instance.name
        instance_path = os.path.join(CONF.instances_path, instance_name)

        self._vmutils.create_vm(instance_name,
                                instance.memory_mb,
                                instance.vcpus,
                                CONF.hyperv.limit_cpu_features,
                                CONF.hyperv.dynamic_memory_ratio,
                                vm_gen,
                                instance_path,
                                [instance.uuid])

        flavor_extra_specs = instance.flavor.extra_specs
        remote_fx_config = flavor_extra_specs.get(
                constants.FLAVOR_REMOTE_FX_EXTRA_SPEC_KEY)
        if remote_fx_config:
            if vm_gen == constants.VM_GEN_2:
                raise vmutils.HyperVException(_("RemoteFX is not supported "
                                                "on generation 2 virtual "
                                                "machines."))
            else:
                self._configure_remotefx(instance, remote_fx_config)

        self._vmutils.create_scsi_controller(instance_name)
        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        ctrl_disk_addr = 0
        if root_vhd_path:
            self._attach_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr,
                               controller_type)
            ctrl_disk_addr += 1

        if eph_vhd_path:
            self._attach_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr,
                               controller_type)

        # If ebs_root is False, the first volume will be attached to SCSI
        # controller. Generation 2 VMs only has a SCSI controller.
        ebs_root = vm_gen is not constants.VM_GEN_2 and root_vhd_path is None
        self._volumeops.attach_volumes(block_device_info,
                                       instance_name,
                                       ebs_root)

        serial_ports = self._get_image_serial_port_settings(image_meta)
        self._create_vm_com_port_pipes(instance, serial_ports)
        self._set_instance_disk_qos_specs(instance)

        for vif in network_info:
            LOG.debug('Creating nic for instance', instance=instance)
            self._vmutils.create_nic(instance_name,
                                     vif['id'],
                                     vif['address'])
            vif_driver = self._get_vif_driver(vif.get('type'))
            vif_driver.plug(instance, vif)

        if CONF.hyperv.enable_instance_metrics_collection:
            self._vmutils.enable_vm_metrics_collection(instance_name)
    def get_free_controller_slot(self, scsi_controller_path):
        attached_disks = self.get_attached_disks(scsi_controller_path)
        used_slots = [int(self._get_disk_resource_address(disk)) for disk in attached_disks]

        for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
            if slot not in used_slots:
                return slot
        raise HyperVException(_("Exceeded the maximum number of slots"))
Exemple #40
0
 def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
     curr_size = vhd_info['MaxInternalSize']
     if new_size < curr_size:
         raise vmutils.VHDResizeException(
             _("Cannot resize a VHD "
               "to a smaller size"))
     elif new_size > curr_size:
         self._resize_vhd(vhd_path, new_size)
    def create_dynamic_vhd(self, path, max_internal_size, format):
        if format != constants.DISK_FORMAT_VHD:
            raise vmutils.HyperVException(_("Unsupported disk format: %s") %
                                          format)

        (job_path, ret_val) = self._image_man_svc.CreateDynamicVirtualHardDisk(
            Path=path, MaxInternalSize=max_internal_size)
        self._vmutils.check_ret_val(ret_val, job_path)
 def check_admin_permissions(self):
     if not self._conn.Msvm_VirtualSystemManagementService():
         msg = _(
             "The Windows account running nova-compute on this Hyper-V"
             " host doesn't have the required permissions to create or"
             " operate the virtual machine."
         )
         raise HyperVAuthorizationException(msg)
    def create_dynamic_vhd(self, path, max_internal_size, format):
        vhd_format = self._vhd_format_map.get(format)
        if not vhd_format:
            raise vmutils.HyperVException(_("Unsupported disk format: %s") %
                                          format)

        self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
                         max_internal_size=max_internal_size)
Exemple #44
0
    def _wait_for_job(self, job_path):
        """Poll WMI job state and wait for completion."""
        job = self._get_wmi_obj(job_path)

        while job.JobState == constants.WMI_JOB_STATE_RUNNING:
            time.sleep(0.1)
            job = self._get_wmi_obj(job_path)
        if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
            job_state = job.JobState
            if job.path().Class == "Msvm_ConcreteJob":
                err_sum_desc = job.ErrorSummaryDescription
                err_desc = job.ErrorDescription
                err_code = job.ErrorCode
                raise HyperVException(
                    _("WMI job failed with status "
                      "%(job_state)d. Error details: "
                      "%(err_sum_desc)s - %(err_desc)s - "
                      "Error code: %(err_code)d") % {
                          'job_state': job_state,
                          'err_sum_desc': err_sum_desc,
                          'err_desc': err_desc,
                          'err_code': err_code
                      })
            else:
                (error, ret_val) = job.GetError()
                if not ret_val and error:
                    raise HyperVException(
                        _("WMI job failed with status "
                          "%(job_state)d. Error details: "
                          "%(error)s") % {
                              'job_state': job_state,
                              'error': error
                          })
                else:
                    raise HyperVException(
                        _("WMI job failed with status "
                          "%d. No error "
                          "description available") % job_state)
        desc = job.Description
        elap = job.ElapsedTime
        LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s", {
            'desc': desc,
            'elap': elap
        })
        return job
Exemple #45
0
 def _lookup_vm(self, vm_name):
     vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
     n = len(vms)
     if n == 0:
         return None
     elif n > 1:
         raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
     else:
         return vms[0]
Exemple #46
0
    def _configure_remotefx(self, instance, config):
        if not CONF.hyperv.enable_remotefx:
            raise vmutils.HyperVException(
                _("enable_remotefx configuration option needs to be set to " "True in order to use RemoteFX")
            )

        if not self._hostutils.check_server_feature(self._hostutils.FEATURE_RDS_VIRTUALIZATION):
            raise vmutils.HyperVException(
                _("The RDS-Virtualization feature must be installed " "in order to use RemoteFX")
            )

        instance_name = instance.name
        LOG.debug("Configuring RemoteFX for instance: %s", instance_name)

        (remotefx_max_resolution, remotefx_monitor_count) = config.split(",")
        remotefx_monitor_count = int(remotefx_monitor_count)

        self._vmutils.enable_remotefx_video_adapter(instance_name, remotefx_monitor_count, remotefx_max_resolution)
Exemple #47
0
 def _lookup_vm(self, vm_name):
     vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
     n = len(vms)
     if n == 0:
         return None
     elif n > 1:
         raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
     else:
         return vms[0]
Exemple #48
0
 def host_power_action(self, action):
     """Reboots, shuts down or powers up the host."""
     if action in [constants.HOST_POWER_ACTION_SHUTDOWN,
                   constants.HOST_POWER_ACTION_REBOOT]:
         self._hostutils.host_power_action(action)
     else:
         if action == constants.HOST_POWER_ACTION_STARTUP:
             raise NotImplementedError(
                 _("Host PowerOn is not supported by the Hyper-V driver"))
Exemple #49
0
 def host_power_action(self, action):
     """Reboots, shuts down or powers up the host."""
     if action in [constants.HOST_POWER_ACTION_SHUTDOWN,
                   constants.HOST_POWER_ACTION_REBOOT]:
         self._hostutils.host_power_action(action)
     else:
         if action == constants.HOST_POWER_ACTION_STARTUP:
             raise NotImplementedError(
                 _("Host PowerOn is not supported by the Hyper-V driver"))
Exemple #50
0
    def create_dynamic_vhd(self, path, max_internal_size, format):
        vhd_format = self._vhd_format_map.get(format)
        if not vhd_format:
            raise vmutils.HyperVException(
                _("Unsupported disk format: %s") % format)

        self._create_vhd(self._VHD_TYPE_DYNAMIC,
                         vhd_format,
                         path,
                         max_internal_size=max_internal_size)
Exemple #51
0
    def create_dynamic_vhd(self, path, max_internal_size, format):
        if format != constants.DISK_FORMAT_VHD:
            raise vmutils.HyperVException(
                _("Unsupported disk format: %s") % format)

        image_man_svc = self._conn.Msvm_ImageManagementService()[0]

        (job_path, ret_val) = image_man_svc.CreateDynamicVirtualHardDisk(
            Path=path, MaxInternalSize=max_internal_size)
        self._vmutils.check_ret_val(ret_val, job_path)
Exemple #52
0
 def _requires_certificate(self, instance_id, image_meta):
     os_type = image_meta.get('properties', {}).get('os_type', None)
     if not os_type:
         reason = _('For secure boot, os_type must be specified in image '
                    'properties.')
         raise exception.InstanceUnacceptable(instance_id=instance_id,
                                              reason=reason)
     elif os_type == 'windows':
         return False
     return True
 def _requires_certificate(self, instance_id, image_meta):
     os_type = image_meta.get('properties', {}).get('os_type', None)
     if not os_type:
         reason = _('For secure boot, os_type must be specified in image '
                    'properties.')
         raise exception.InstanceUnacceptable(instance_id=instance_id,
                                              reason=reason)
     elif os_type == 'windows':
         return False
     return True
    def _create_config_drive(self,
                             instance,
                             injected_files,
                             admin_password,
                             network_info,
                             rescue=False):
        if CONF.config_drive_format != 'iso9660':
            raise vmutils.UnsupportedConfigDriveFormatException(
                _('Invalid config_drive_format "%s"') %
                CONF.config_drive_format)

        LOG.info(_LI('Using config drive for instance'), instance=instance)

        extra_md = {}
        if admin_password and CONF.hyperv.config_drive_inject_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md,
                                                     network_info=network_info)

        configdrive_path_iso = self._pathutils.get_configdrive_path(
            instance.name, constants.DVD_FORMAT, rescue=rescue)
        LOG.info(_LI('Creating config drive at %(path)s'),
                 {'path': configdrive_path_iso},
                 instance=instance)

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(configdrive_path_iso)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Creating config drive failed with '
                                  'error: %s'),
                              e,
                              instance=instance)

        if not CONF.hyperv.config_drive_cdrom:
            configdrive_path = self._pathutils.get_configdrive_path(
                instance.name, constants.DISK_FORMAT_VHD, rescue=rescue)
            utils.execute(CONF.hyperv.qemu_img_cmd,
                          'convert',
                          '-f',
                          'raw',
                          '-O',
                          'vpc',
                          configdrive_path_iso,
                          configdrive_path,
                          attempts=1)
            self._pathutils.remove(configdrive_path_iso)
        else:
            configdrive_path = configdrive_path_iso

        return configdrive_path
    def host_power_action(self, action):
        win32_os = self._conn_cimv2.Win32_OperatingSystem()[0]

        if action == constants.HOST_POWER_ACTION_SHUTDOWN:
            win32_os.Win32Shutdown(self._HOST_FORCED_SHUTDOWN)
        elif action == constants.HOST_POWER_ACTION_REBOOT:
            win32_os.Win32Shutdown(self._HOST_FORCED_REBOOT)
        else:
            raise NotImplementedError(
                _("Host %(action)s is not supported by the Hyper-V driver") %
                {"action": action})
Exemple #56
0
 def _check_and_attach_config_drive(self, instance, vm_gen):
     if configdrive.required_by(instance):
         configdrive_path = self._pathutils.lookup_configdrive_path(
             instance.name)
         if configdrive_path:
             self._vmops.attach_config_drive(instance, configdrive_path,
                                             vm_gen)
         else:
             raise vmutils.HyperVException(
                 _("Config drive is required by instance: %s, "
                   "but it does not exist.") % instance.name)
 def _get_vhd_dynamic_blk_size(self, vhd_path):
     blk_size_offset = VHD_BLK_SIZE_OFFSET
     try:
         with open(vhd_path, "rb") as f:
             f.seek(blk_size_offset)
             version = f.read(4)
     except IOError:
         raise vmutils.HyperVException(_("Unable to obtain block size from"
                                         " VHD %(vhd_path)s") %
                                         {"vhd_path": vhd_path})
     return struct.unpack('>i', version)[0]
 def _check_and_attach_config_drive(self, instance, vm_gen):
     if configdrive.required_by(instance):
         configdrive_path = self._pathutils.lookup_configdrive_path(
             instance.name)
         if configdrive_path:
             self._vmops.attach_config_drive(instance, configdrive_path,
                                             vm_gen)
         else:
             raise vmutils.HyperVException(
                 _("Config drive is required by instance: %s, "
                   "but it does not exist.") % instance.name)
Exemple #59
0
 def _get_vhd_dynamic_blk_size(self, vhd_path):
     blk_size_offset = VHD_BLK_SIZE_OFFSET
     try:
         with open(vhd_path, "rb") as f:
             f.seek(blk_size_offset)
             version = f.read(4)
     except IOError:
         raise vmutils.HyperVException(
             _("Unable to obtain block size from"
               " VHD %(vhd_path)s") % {"vhd_path": vhd_path})
     return struct.unpack('>i', version)[0]
Exemple #60
0
def _get_virt_utils_class(v1_class, v2_class):
    # The "root/virtualization" WMI namespace is no longer supported on
    # Windows Server / Hyper-V Server 2012 R2 / Windows 8.1
    # (kernel version 6.3) or above.
    if (CONF.hyperv.force_hyperv_utils_v1
            and get_hostutils().check_min_windows_version(6, 3)):
        raise vmutils.HyperVException(
            _('The "force_hyperv_utils_v1" option cannot be set to "True" '
              'on Windows Server / Hyper-V Server 2012 R2 or above as the WMI '
              '"root/virtualization" namespace is no longer supported.'))
    return _get_class(v1_class, v2_class, CONF.hyperv.force_hyperv_utils_v1)