def _configure_remotefx(self, instance, vm_gen, config): if not CONF.hyperv.enable_remotefx: reason = _("enable_remotefx configuration option needs to be set " "to True in order to use RemoteFX") raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) if not self._hostutils.check_server_feature( self._hostutils.FEATURE_RDS_VIRTUALIZATION): reason = _("The RDS-Virtualization feature must be installed in " "order to use RemoteFX") raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) if not self._vmutils.vm_gen_supports_remotefx(vm_gen): reason = _("RemoteFX is not supported on generation %s virtual " "machines on this version of Windows.") % vm_gen raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) instance_name = instance.name LOG.debug('Configuring RemoteFX for instance: %s', instance_name) remotefx_args = config.split(',') remotefx_max_resolution = remotefx_args[0] remotefx_monitor_count = int(remotefx_args[1]) remotefx_vram = remotefx_args[2] if len(remotefx_args) == 3 else None vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None self._vmutils.enable_remotefx_video_adapter(instance_name, remotefx_monitor_count, remotefx_max_resolution, vram_bytes)
def _get_instance_vnuma_config(self, instance, image_meta): """Returns the appropriate NUMA configuration for Hyper-V instances, given the desired instance NUMA topology. :param instance: instance containing the flavor and it's extra_specs, where the NUMA topology is defined. :param image_meta: image's metadata, containing properties related to the instance's NUMA topology. :returns: memory amount and number of vCPUs per NUMA node or (None, None), if instance NUMA topology was not requested. :raises exception.InstanceUnacceptable: If the given instance NUMA topology is not possible on Hyper-V, or if CPU pinning is required. """ instance_topology = hardware.numa_get_constraints(instance.flavor, image_meta) if not instance_topology: # instance NUMA topology was not requested. return None, None memory_per_numa_node = instance_topology.cells[0].memory cpus_per_numa_node = len(instance_topology.cells[0].cpuset) # TODO(stephenfin): We can avoid this check entirely if we rely on the # 'supports_pcpus' driver capability (via a trait), but we need to drop # support for the legacy 'vcpu_pin_set' path in the libvirt driver # first if instance_topology.cpu_policy not in ( None, fields.CPUAllocationPolicy.SHARED, ): raise exception.InstanceUnacceptable( reason=_("Hyper-V does not support CPU pinning."), instance_id=instance.uuid) # validate that the requested NUMA topology is not asymetric. # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. # same with memory. for cell in instance_topology.cells: if len(cell.cpuset) != cpus_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven number of processors. (%(a)s != %(b)s)") % { 'a': len(cell.cpuset), 'b': cpus_per_numa_node} raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) if cell.memory != memory_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven amounts of memory. (%(a)s != %(b)s)") % { 'a': cell.memory, 'b': memory_per_numa_node} raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) return memory_per_numa_node, cpus_per_numa_node
def _requires_secure_boot(self, instance, image_meta, vm_gen): """Checks whether the given instance requires Secure Boot. Secure Boot feature will be enabled by setting the "os_secure_boot" image property or the "os:secure_boot" flavor extra spec to required. :raises exception.InstanceUnacceptable: if the given image_meta has no os_type property set, or if the image property value and the flavor extra spec value are conflicting, or if Secure Boot is required, but the instance's VM generation is 1. """ img_secure_boot = image_meta.properties.get('os_secure_boot') flavor_secure_boot = instance.flavor.extra_specs.get( constants.FLAVOR_SPEC_SECURE_BOOT) requires_sb = False conflicting_values = False if flavor_secure_boot == fields.SecureBoot.REQUIRED: requires_sb = True if img_secure_boot == fields.SecureBoot.DISABLED: conflicting_values = True elif img_secure_boot == fields.SecureBoot.REQUIRED: requires_sb = True if flavor_secure_boot == fields.SecureBoot.DISABLED: conflicting_values = True if conflicting_values: reason = _( "Conflicting image metadata property and flavor extra_specs " "values: os_secure_boot (%(image_secure_boot)s) / " "os:secure_boot (%(flavor_secure_boot)s)") % { 'image_secure_boot': img_secure_boot, 'flavor_secure_boot': flavor_secure_boot } raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) if requires_sb: if vm_gen != constants.VM_GEN_2: reason = _('Secure boot requires generation 2 VM.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) os_type = image_meta.properties.get('os_type') if not os_type: reason = _('For secure boot, os_type must be specified in ' 'image properties.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) return requires_sb
def _get_instance_vnuma_config(self, instance, image_meta): """Returns the appropriate NUMA configuration for Hyper-V instances, given the desired instance NUMA topology. :param instance: instance containing the flavor and it's extra_specs, where the NUMA topology is defined. :param image_meta: image's metadata, containing properties related to the instance's NUMA topology. :returns: memory amount and number of vCPUs per NUMA node or (None, None), if instance NUMA topology was not requested. :raises exception.InstanceUnacceptable: If the given instance NUMA topology is not possible on Hyper-V, or if CPU pinning is required. """ instance_topology = hardware.numa_get_constraints( instance.flavor, image_meta) if not instance_topology: # instance NUMA topology was not requested. return None, None memory_per_numa_node = instance_topology.cells[0].memory cpus_per_numa_node = len(instance_topology.cells[0].cpuset) if instance_topology.cpu_pinning_requested: raise exception.InstanceUnacceptable( reason=_("Hyper-V does not support CPU pinning."), instance_id=instance.uuid) # validate that the requested NUMA topology is not asymetric. # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. # same with memory. for cell in instance_topology.cells: if len(cell.cpuset) != cpus_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven number of processors. (%(a)s != %(b)s)") % { 'a': len(cell.cpuset), 'b': cpus_per_numa_node } raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) if cell.memory != memory_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven amounts of memory. (%(a)s != %(b)s)") % { 'a': cell.memory, 'b': memory_per_numa_node } raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) return memory_per_numa_node, cpus_per_numa_node
def replace_allocation_with_migration(context, instance, migration): """Replace instance's allocation with one for a migration. :returns: (source_compute_node, migration_allocation) """ try: source_cn = objects.ComputeNode.get_by_host_and_nodename( context, instance.host, instance.node) except exception.ComputeHostNotFound: LOG.error('Unable to find record for source ' 'node %(node)s on %(host)s', {'host': instance.host, 'node': instance.node}, instance=instance) # A generic error like this will just error out the migration # and do any rollback required raise schedclient = scheduler_client.SchedulerClient() reportclient = schedclient.reportclient orig_alloc = reportclient.get_allocations_for_consumer_by_provider( source_cn.uuid, instance.uuid) if not orig_alloc: LOG.error('Unable to find existing allocations for instance', instance=instance) # A generic error like this will just error out the migration # and do any rollback required raise exception.InstanceUnacceptable( instance_id=instance.uuid, reason=_('Instance has no source node allocation')) # FIXME(danms): Since we don't have an atomic operation to adjust # allocations for multiple consumers, we have to have space on the # source for double the claim before we delete the old one # FIXME(danms): This method is flawed in that it asssumes allocations # against only one provider. So, this may overwite allocations against # a shared provider, if we had one. success = reportclient.put_allocations(source_cn.uuid, migration.uuid, orig_alloc, instance.project_id, instance.user_id) if not success: LOG.error('Unable to replace resource claim on source ' 'host %(host)s node %(node)s for instance', {'host': instance.host, 'node': instance.node}, instance=instance) # Mimic the "no space" error that could have come from the # scheduler. Once we have an atomic replace operation, this # would be a severe error. raise exception.NoValidHost( reason=_('Unable to replace instance claim on source')) else: LOG.debug('Created allocations for migration %(mig)s on %(rp)s', {'mig': migration.uuid, 'rp': source_cn.uuid}) reportclient.delete_allocation_for_instance(instance.uuid) return source_cn, orig_alloc
def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason)
def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _LE('Requested VM Generation %s is not supported on ' 'this OS.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason)
def _requires_certificate(self, instance_id, image_meta): os_type = image_meta.get('properties', {}).get('os_type', None) if not os_type: reason = _('For secure boot, os_type must be specified in image ' 'properties.') raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) elif os_type == 'windows': return False return True
def _get_instance_vnuma_config(self, instance, image_meta): """Returns the appropriate NUMA configuration for Hyper-V instances, given the desired instance NUMA topology. :param instance: instance containing the flavor and it's extra_specs, where the NUMA topology is defined. :param image_meta: image's metadata, containing properties related to the instance's NUMA topology. :returns: memory amount and number of vCPUs per NUMA node or (None, None), if instance NUMA topology was not requested. :raises exception.InstanceUnacceptable: If the given instance NUMA topology is not possible on Hyper-V. """ image_meta = objects.ImageMeta.from_dict(image_meta) instance_topology = hardware.numa_get_constraints( instance.flavor, image_meta) if not instance_topology: # instance NUMA topology was not requested. return None, None memory_per_numa_node = instance_topology.cells[0].memory cpus_per_numa_node = len(instance_topology.cells[0].cpuset) cpus_pinned = instance_topology.cells[0].cpu_pinning is not None if cpus_pinned: raise exception.InstanceUnacceptable( reason="Hyper-V cannot guarantee the CPU pinning.", instance_id=instance.uuid) # validate that the requested NUMA topology is not asymetric. # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. # same with memory. for cell in instance_topology.cells: if (len(cell.cpuset) != cpus_per_numa_node or cell.memory != memory_per_numa_node): raise exception.InstanceUnacceptable( reason="Hyper-V cannot guarantee the given instance NUMA " "topology.", instance_id=instance.uuid) return memory_per_numa_node, cpus_per_numa_node
def get_image_vm_generation(self, instance_id, image_meta): default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_meta.properties.get('hw_machine_type', default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): reason = _('Requested VM Generation %s is not supported on ' 'this OS.') % image_prop_vm raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) return VM_GENERATIONS[image_prop_vm]
def get_image_vm_generation(self, instance_id, root_vhd_path, image_meta): default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_meta.properties.get( 'hw_machine_type', default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): reason = _LE('Requested VM Generation %s is not supported on ' 'this OS.') % image_prop_vm raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) vm_gen = VM_GENERATIONS[image_prop_vm] if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format( root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _LE('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) return vm_gen
def get_image_vm_generation(self, instance_id, image_meta): image_props = image_meta['properties'] default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): reason = _LE('Requested VM Generation %s is not supported on ' 'this OS.') % image_prop_vm raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) return VM_GENERATIONS[image_prop_vm]
def _configure_remotefx(self, instance, vm_gen): extra_specs = instance.flavor.extra_specs remotefx_max_resolution = extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_RES) if not remotefx_max_resolution: # RemoteFX not required. return if not CONF.hyperv.enable_remotefx: raise exception.InstanceUnacceptable( _("enable_remotefx configuration option needs to be set to " "True in order to use RemoteFX.")) if not self._hostutils.check_server_feature( self._hostutils.FEATURE_RDS_VIRTUALIZATION): raise exception.InstanceUnacceptable( _("The RDS-Virtualization feature must be installed in order " "to use RemoteFX.")) if not self._vmutils.vm_gen_supports_remotefx(vm_gen): raise exception.InstanceUnacceptable( _("RemoteFX is not supported on generation %s virtual " "machines on this version of Windows.") % vm_gen) instance_name = instance.name LOG.debug('Configuring RemoteFX for instance: %s', instance_name) remotefx_monitor_count = int(extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1) remotefx_vram = extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_VRAM) vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None self._vmutils.enable_remotefx_video_adapter( instance_name, remotefx_monitor_count, remotefx_max_resolution, vram_bytes)
def _requires_secure_boot(self, instance, image_meta, vm_gen): flavor = instance.flavor flavor_secure_boot = flavor.extra_specs.get( constants.FLAVOR_SPEC_SECURE_BOOT, None) image_props = image_meta['properties'] image_prop_secure_boot = image_props.get( constants.IMAGE_PROP_SECURE_BOOT, None) if flavor_secure_boot in (constants.REQUIRED, constants.DISABLED): requires_secure_boot = constants.REQUIRED == flavor_secure_boot else: requires_secure_boot = image_prop_secure_boot == constants.REQUIRED if vm_gen != constants.VM_GEN_2 and requires_secure_boot: reason = _('Secure boot requires generation 2 VM.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) return requires_secure_boot
def _destroy_kernel_ramdisk(self, instance, vm_ref): """Three situations can occur: 1. We have neither a ramdisk nor a kernel, in which case we are a RAW image and can omit this step 2. We have one or the other, in which case, we should flag as an error 3. We have both, in which case we safely remove both the kernel and the ramdisk. """ instance_id = instance.id if not instance.kernel_id and not instance.ramdisk_id: # 1. No kernel or ramdisk LOG.debug(_("Instance %(instance_id)s using RAW or VHD, " "skipping kernel and ramdisk deletion") % locals()) return if not (instance.kernel_id and instance.ramdisk_id): # 2. We only have kernel xor ramdisk raise exception.InstanceUnacceptable(instance_id=instance_id, reason=_("instance has a kernel or ramdisk but not both")) # 3. We have both kernel and ramdisk (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, vm_ref) LOG.debug(_("Removing kernel/ramdisk files")) args = {'kernel-file': kernel, 'ramdisk-file': ramdisk} task = self._session.async_call_plugin( 'glance', 'remove_kernel_ramdisk', args) self._session.wait_for_task(task, instance.id) LOG.debug(_("kernel/ramdisk files removed"))