예제 #1
0
    def get_numa_nodes(self):
        numa_nodes = self._conn_virt.Msvm_NumaNode()
        nodes_info = []
        for node in numa_nodes:
            related_info = node.associators()

            memory_info = self._get_numa_memory_info(related_info)
            if not memory_info:
                LOG.warning(_LW("Could not find memory information for NUMA "
                                "node. Skipping node measurements."))
                continue

            cpu_info = self._get_numa_cpu_info(related_info)
            if not cpu_info:
                LOG.warning(_LW("Could not find CPU information for NUMA "
                                "node. Skipping node measurements."))
                continue

            node_info = {
                # NodeID has the format: Microsoft:PhysicalNode\<NODE_ID>
                'id': node.NodeID.split('\\')[-1],

                # memory block size is 1MB.
                'memory': memory_info.NumberOfBlocks,
                'memory_usage': node.CurrentlyConsumableMemoryBlocks,

                # DeviceID has the format: Microsoft:UUID\0\<DEV_ID>
                'cpuset': set([c.DeviceID.split('\\')[-1] for c in cpu_info]),
                # cpu_usage can be set, each CPU has a "LoadPercentage"
                'cpu_usage': 0,
            }

            nodes_info.append(node_info)

        return nodes_info
예제 #2
0
    def parse_disk_qos_specs(self, qos_specs):
        total_bytes_sec = int(qos_specs.get('total_bytes_sec', 0))
        min_bytes_sec = int(qos_specs.get('min_bytes_sec', 0))

        total_iops = int(qos_specs.get('total_iops_sec',
                                       self._bytes_per_sec_to_iops(
                                           total_bytes_sec)))
        min_iops = int(qos_specs.get('min_iops_sec',
                                     self._bytes_per_sec_to_iops(
                                         min_bytes_sec)))

        if total_iops and total_iops < min_iops:
            err_msg = (_("Invalid QoS specs: minimum IOPS cannot be greater "
                         "than maximum IOPS. "
                         "Requested minimum IOPS: %(min_iops)s "
                         "Requested maximum IOPS: %(total_iops)s.") %
                       {'min_iops': min_iops,
                        'total_iops': total_iops})
            raise exception.Invalid(err_msg)

        unsupported_specs = [spec for spec in qos_specs if
                             spec not in self._SUPPORTED_QOS_SPECS]
        if unsupported_specs:
            LOG.warning(_LW('Ignoring unsupported qos specs: '
                            '%(unsupported_specs)s. '
                            'Supported qos specs: %(supported_qos_speces)s'),
                        {'unsupported_specs': unsupported_specs,
                         'supported_qos_speces': self._SUPPORTED_QOS_SPECS})

        return min_iops, total_iops
예제 #3
0
    def parse_disk_qos_specs(self, qos_specs):
        total_bytes_sec = int(qos_specs.get('total_bytes_sec', 0))
        min_bytes_sec = int(qos_specs.get('min_bytes_sec', 0))

        total_iops = int(
            qos_specs.get('total_iops_sec',
                          self._bytes_per_sec_to_iops(total_bytes_sec)))
        min_iops = int(
            qos_specs.get('min_iops_sec',
                          self._bytes_per_sec_to_iops(min_bytes_sec)))

        if total_iops and total_iops < min_iops:
            err_msg = (_("Invalid QoS specs: minimum IOPS cannot be greater "
                         "than maximum IOPS. "
                         "Requested minimum IOPS: %(min_iops)s "
                         "Requested maximum IOPS: %(total_iops)s.") % {
                             'min_iops': min_iops,
                             'total_iops': total_iops
                         })
            raise exception.Invalid(err_msg)

        unsupported_specs = [
            spec for spec in qos_specs if spec not in self._SUPPORTED_QOS_SPECS
        ]
        if unsupported_specs:
            LOG.warning(
                _LW('Ignoring unsupported qos specs: '
                    '%(unsupported_specs)s. '
                    'Supported qos specs: %(supported_qos_speces)s'), {
                        'unsupported_specs': unsupported_specs,
                        'supported_qos_speces': self._SUPPORTED_QOS_SPECS
                    })

        return min_iops, total_iops
예제 #4
0
    def validate_initiators(self):
        # The MS iSCSI initiator service can manage the software iSCSI
        # initiator as well as hardware initiators.
        initiator_list = CONF.hyperv.iscsi_initiator_list
        valid_initiators = True

        if not initiator_list:
            LOG.info(
                _LI("No iSCSI initiator was explicitly requested. "
                    "The Microsoft iSCSI initiator will choose the "
                    "initiator when estabilishing sessions."))
        else:
            available_initiators = self._iscsi_utils.get_iscsi_initiators()
            for initiator in initiator_list:
                if initiator not in available_initiators:
                    valid_initiators = False
                    msg = _LW("The requested initiator %(req_initiator)s "
                              "is not in the list of available initiators: "
                              "%(avail_initiators)s.")
                    LOG.warning(
                        msg,
                        dict(req_initiator=initiator,
                             avail_initiators=available_initiators))

        return valid_initiators
예제 #5
0
 def set_disk_host_resource(self, vm_name, controller_path, address,
                            mounted_disk_path):
     disk_found = False
     vm = self._lookup_vm_check(vm_name)
     (disk_resources, volume_resources) = self._get_vm_disks(vm)
     for disk_resource in disk_resources + volume_resources:
         if (disk_resource.Parent == controller_path and
                 self._get_disk_resource_address(disk_resource) ==
                 str(address)):
             if (disk_resource.HostResource and
                     disk_resource.HostResource[0] != mounted_disk_path):
                 LOG.debug('Updating disk host resource "%(old)s" to '
                             '"%(new)s"' %
                           {'old': disk_resource.HostResource[0],
                            'new': mounted_disk_path})
                 disk_resource.HostResource = [mounted_disk_path]
                 self._modify_virt_resource(disk_resource, vm.path_())
             disk_found = True
             break
     if not disk_found:
         LOG.warning(_LW('Disk not found on controller '
                         '"%(controller_path)s" with '
                         'address "%(address)s"'),
                     {'controller_path': controller_path,
                      'address': address})
예제 #6
0
    def _soft_shutdown(self, instance,
                       timeout=CONF.hyperv.wait_soft_reboot_seconds,
                       retry_interval=SHUTDOWN_TIME_INCREMENT):
        """Perform a soft shutdown on the VM.

           :return: True if the instance was shutdown within time limit,
                    False otherwise.
        """
        LOG.debug("Performing Soft shutdown on instance", instance=instance)

        while timeout > 0:
            # Perform a soft shutdown on the instance.
            # Wait maximum timeout for the instance to be shutdown.
            # If it was not shutdown, retry until it succeeds or a maximum of
            # time waited is equal to timeout.
            wait_time = min(retry_interval, timeout)
            try:
                LOG.debug("Soft shutdown instance, timeout remaining: %d",
                          timeout, instance=instance)
                self._vmutils.soft_shutdown_vm(instance.name)
                if self._wait_for_power_off(instance.name, wait_time):
                    LOG.info(_LI("Soft shutdown succeeded."),
                             instance=instance)
                    return True
            except os_win_exc.HyperVException as e:
                # Exception is raised when trying to shutdown the instance
                # while it is still booting.
                LOG.debug("Soft shutdown failed: %s", e, instance=instance)
                time.sleep(wait_time)

            timeout -= retry_interval

        LOG.warning(_LW("Timed out while waiting for soft shutdown."),
                    instance=instance)
        return False
예제 #7
0
    def _soft_shutdown(self, instance,
                       timeout=CONF.hyperv.wait_soft_reboot_seconds,
                       retry_interval=SHUTDOWN_TIME_INCREMENT):
        """Perform a soft shutdown on the VM.

           :return: True if the instance was shutdown within time limit,
                    False otherwise.
        """
        LOG.debug("Performing Soft shutdown on instance", instance=instance)

        while timeout > 0:
            # Perform a soft shutdown on the instance.
            # Wait maximum timeout for the instance to be shutdown.
            # If it was not shutdown, retry until it succeeds or a maximum of
            # time waited is equal to timeout.
            wait_time = min(retry_interval, timeout)
            try:
                LOG.debug("Soft shutdown instance, timeout remaining: %d",
                          timeout, instance=instance)
                self._vmutils.soft_shutdown_vm(instance.name)
                if self._wait_for_power_off(instance.name, wait_time):
                    LOG.info(_LI("Soft shutdown succeeded."),
                             instance=instance)
                    return True
            except vmutils.HyperVException as e:
                # Exception is raised when trying to shutdown the instance
                # while it is still booting.
                LOG.debug("Soft shutdown failed: %s", e, instance=instance)
                time.sleep(wait_time)

            timeout -= retry_interval

        LOG.warning(_LW("Timed out while waiting for soft shutdown."),
                    instance=instance)
        return False
예제 #8
0
 def set_disk_host_resource(self, vm_name, controller_path, address,
                            mounted_disk_path):
     disk_found = False
     vm = self._lookup_vm_check(vm_name)
     (disk_resources, volume_resources) = self._get_vm_disks(vm)
     for disk_resource in disk_resources + volume_resources:
         if (disk_resource.Parent == controller_path
                 and self._get_disk_resource_address(disk_resource)
                 == str(address)):
             if (disk_resource.HostResource and
                     disk_resource.HostResource[0] != mounted_disk_path):
                 LOG.debug(
                     'Updating disk host resource "%(old)s" to '
                     '"%(new)s"' % {
                         'old': disk_resource.HostResource[0],
                         'new': mounted_disk_path
                     })
                 disk_resource.HostResource = [mounted_disk_path]
                 self._modify_virt_resource(disk_resource, vm.path_())
             disk_found = True
             break
     if not disk_found:
         LOG.warning(
             _LW('Disk not found on controller '
                 '"%(controller_path)s" with '
                 'address "%(address)s"'), {
                     'controller_path': controller_path,
                     'address': address
                 })
예제 #9
0
 def validate_qos_specs(qos_specs, supported_qos_specs):
     unsupported_specs = set(qos_specs.keys()).difference(
         supported_qos_specs)
     if unsupported_specs:
         msg = (_LW('Got unsupported QoS specs: '
                    '%(unsupported_specs)s. '
                    'Supported qos specs: %(supported_qos_specs)s') %
                {'unsupported_specs': unsupported_specs,
                 'supported_qos_specs': supported_qos_specs})
         LOG.warning(msg)
예제 #10
0
 def validate_qos_specs(qos_specs, supported_qos_specs):
     unsupported_specs = set(
         qos_specs.keys()).difference(supported_qos_specs)
     if unsupported_specs:
         msg = (_LW('Got unsupported QoS specs: '
                    '%(unsupported_specs)s. '
                    'Supported qos specs: %(supported_qos_specs)s') % {
                        'unsupported_specs': unsupported_specs,
                        'supported_qos_specs': supported_qos_specs
                    })
         LOG.warning(msg)
예제 #11
0
 def get_volume_connector(self, instance):
     if not self._initiator:
         self._initiator = self._volutils.get_iscsi_initiator()
         if not self._initiator:
             LOG.warning(_LW('Could not determine iscsi initiator name'),
                         instance=instance)
     return {
         'ip': CONF.my_block_storage_ip,
         'host': CONF.host,
         'initiator': self._initiator,
     }
예제 #12
0
 def get_volume_connector(self, instance):
     if not self._initiator:
         self._initiator = self._volutils.get_iscsi_initiator()
         if not self._initiator:
             LOG.warning(_LW('Could not determine iscsi initiator name'),
                         instance=instance)
     return {
         'ip': CONF.my_block_storage_ip,
         'host': CONF.host,
         'initiator': self._initiator,
     }
예제 #13
0
 def set_disk_qos_specs(self, vm_name, disk_path, min_iops, max_iops):
     disk_resource = self._get_mounted_disk_resource_from_path(
         disk_path, is_physical=False)
     try:
         disk_resource.IOPSLimit = max_iops
         disk_resource.IOPSReservation = min_iops
     except AttributeError:
         LOG.warn(_LW("This Windows version does not support disk QoS. "
                      "Ignoring QoS specs."))
         return
     # VMUtilsV2._modify_virt_resource does not require the vm path.
     self._modify_virt_resource(disk_resource, None)
예제 #14
0
 def set_disk_qos_specs(self, vm_name, disk_path, min_iops, max_iops):
     disk_resource = self._get_mounted_disk_resource_from_path(
         disk_path, is_physical=False)
     try:
         disk_resource.IOPSLimit = max_iops
         disk_resource.IOPSReservation = min_iops
     except AttributeError:
         LOG.warn(
             _LW("This Windows version does not support disk QoS. "
                 "Ignoring QoS specs."))
         return
     # VMUtilsV2._modify_virt_resource does not require the vm path.
     self._modify_virt_resource(disk_resource, None)
예제 #15
0
    def _check_ephemeral_disks(self,
                               instance,
                               ephemerals,
                               resize_instance=False):
        instance_name = instance.name
        new_eph_gb = instance.get('ephemeral_gb', 0)

        if len(ephemerals) == 1:
            # NOTE(claudiub): Resize only if there is one ephemeral. If there
            # are more than 1, resizing them can be problematic. This behaviour
            # also exists in the libvirt driver and it has to be addressed in
            # the future.
            ephemerals[0]['size'] = new_eph_gb
        elif sum(eph['size'] for eph in ephemerals) != new_eph_gb:
            # New ephemeral size is different from the original ephemeral size
            # and there are multiple ephemerals.
            LOG.warning(_LW("Cannot resize multiple ephemeral disks for "
                            "instance."),
                        instance=instance)

        for index, eph in enumerate(ephemerals):
            eph_name = "eph%s" % index
            existing_eph_path = self._pathutils.lookup_ephemeral_vhd_path(
                instance_name, eph_name)

            if not existing_eph_path:
                eph['format'] = self._vhdutils.get_best_supported_vhd_format()
                eph['path'] = self._pathutils.get_ephemeral_vhd_path(
                    instance_name, eph['format'], eph_name)
                if not resize_instance:
                    # ephemerals should have existed.
                    raise exception.DiskNotFound(location=eph['path'])

                if eph['size']:
                    # create ephemerals
                    self._vmops.create_ephemeral_disk(instance.name, eph)
            elif eph['size'] > 0:
                # ephemerals exist. resize them.
                eph['path'] = existing_eph_path
                eph_vhd_info = self._vhdutils.get_vhd_info(eph['path'])
                self._check_resize_vhd(eph['path'], eph_vhd_info,
                                       eph['size'] * units.Gi)
            else:
                # ephemeral new size is 0, remove it.
                self._pathutils.remove(existing_eph_path)
                eph['path'] = None
예제 #16
0
    def wait_vif_plug_events(self, instance, network_info):
        timeout = CONF.vif_plugging_timeout
        if utils.is_neutron():
            events = self._get_neutron_events(network_info)
        else:
            events = []

        try:
            with self._virtapi.wait_for_instance_event(
                    instance, events, deadline=timeout,
                    error_callback=self._neutron_failed_callback):
                yield
        except etimeout.Timeout:
            # We never heard from Neutron
            LOG.warning(_LW('Timeout waiting for vif plugging callback for '
                            'instance.'), instance=instance)
            if CONF.vif_plugging_is_fatal:
                raise exception.VirtualInterfaceCreateException()
예제 #17
0
    def wait_vif_plug_events(self, instance, network_info):
        timeout = CONF.vif_plugging_timeout
        if utils.is_neutron():
            events = self._get_neutron_events(network_info)
        else:
            events = []

        try:
            with self._virtapi.wait_for_instance_event(
                    instance, events, deadline=timeout,
                    error_callback=self._neutron_failed_callback):
                yield
        except etimeout.Timeout:
            # We never heard from Neutron
            LOG.warning(_LW('Timeout waiting for vif plugging callback for '
                            'instance.'), instance=instance)
            if CONF.vif_plugging_is_fatal:
                raise exception.VirtualInterfaceCreateException()
예제 #18
0
    def _check_ephemeral_disks(self, instance, ephemerals,
                               resize_instance=False):
        instance_name = instance.name
        new_eph_gb = instance.get('ephemeral_gb', 0)

        if len(ephemerals) == 1:
            # NOTE(claudiub): Resize only if there is one ephemeral. If there
            # are more than 1, resizing them can be problematic. This behaviour
            # also exists in the libvirt driver and it has to be addressed in
            # the future.
            ephemerals[0]['size'] = new_eph_gb
        elif sum(eph['size'] for eph in ephemerals) != new_eph_gb:
            # New ephemeral size is different from the original ephemeral size
            # and there are multiple ephemerals.
            LOG.warning(_LW("Cannot resize multiple ephemeral disks for "
                            "instance."), instance=instance)

        for index, eph in enumerate(ephemerals):
            eph_name = "eph%s" % index
            existing_eph_path = self._pathutils.lookup_ephemeral_vhd_path(
                instance_name, eph_name)

            if not existing_eph_path:
                eph['format'] = self._vhdutils.get_best_supported_vhd_format()
                eph['path'] = self._pathutils.get_ephemeral_vhd_path(
                    instance_name, eph['format'], eph_name)
                if not resize_instance:
                    # ephemerals should have existed.
                    raise exception.DiskNotFound(location=eph['path'])

                if eph['size']:
                    # create ephemerals
                    self._vmops.create_ephemeral_disk(instance.name, eph)
            elif eph['size'] > 0:
                # ephemerals exist. resize them.
                eph['path'] = existing_eph_path
                eph_vhd_info = self._vhdutils.get_vhd_info(eph['path'])
                self._check_resize_vhd(
                    eph['path'], eph_vhd_info, eph['size'] * units.Gi)
            else:
                # ephemeral new size is 0, remove it.
                self._pathutils.remove(existing_eph_path)
                eph['path'] = None
예제 #19
0
 def set_disk_host_resource(self, vm_name, controller_path, address, mounted_disk_path):
     disk_found = False
     vmsettings = self._lookup_vm_check(vm_name)
     (disk_resources, volume_resources) = self._get_vm_disks(vmsettings)
     for disk_resource in disk_resources + volume_resources:
         if disk_resource.Parent == controller_path and self._get_disk_resource_address(disk_resource) == str(
             address
         ):
             if disk_resource.HostResource and disk_resource.HostResource[0] != mounted_disk_path:
                 LOG.debug(
                     'Updating disk host resource "%(old)s" to '
                     '"%(new)s"' % {"old": disk_resource.HostResource[0], "new": mounted_disk_path}
                 )
                 disk_resource.HostResource = [mounted_disk_path]
                 self._modify_virt_resource(disk_resource, vmsettings.path_())
             disk_found = True
             break
     if not disk_found:
         LOG.warning(
             _LW("Disk not found on controller " '"%(controller_path)s" with ' 'address "%(address)s"'),
             {"controller_path": controller_path, "address": address},
         )
예제 #20
0
    def validate_initiators(self):
        # The MS iSCSI initiator service can manage the software iSCSI
        # initiator as well as hardware initiators.
        initiator_list = CONF.hyperv.iscsi_initiator_list
        valid_initiators = True

        if not initiator_list:
            LOG.info(_LI("No iSCSI initiator was explicitly requested. "
                         "The Microsoft iSCSI initiator will choose the "
                         "initiator when estabilishing sessions."))
        else:
            available_initiators = self._iscsi_utils.get_iscsi_initiators()
            for initiator in initiator_list:
                if initiator not in available_initiators:
                    valid_initiators = False
                    msg = _LW("The requested initiator %(req_initiator)s "
                              "is not in the list of available initiators: "
                              "%(avail_initiators)s.")
                    LOG.warning(msg,
                                dict(req_initiator=initiator,
                                     avail_initiators=available_initiators))

        return valid_initiators
예제 #21
0
    def create_instance(self, instance, network_info, root_device,
                        block_device_info, vm_gen, image_meta):
        instance_name = instance.name
        instance_path = os.path.join(CONF.instances_path, instance_name)

        memory_per_numa_node, cpus_per_numa_node = (
            self._get_instance_vnuma_config(instance, image_meta))

        if memory_per_numa_node:
            if CONF.hyperv.dynamic_memory_ratio > 1.0:
                LOG.warning(_LW(
                    "Instance vNUMA topology requested, but dynamic memory "
                    "ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
                    "memory ratio option."),
                            instance=instance)
            dynamic_memory_ratio = 1.0
            vnuma_enabled = True
        else:
            dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
            vnuma_enabled = False

        self._vmutils.create_vm(instance_name, vnuma_enabled, vm_gen,
                                instance_path, [instance.uuid])

        self._vmutils.update_vm(instance_name, instance.memory_mb,
                                memory_per_numa_node, instance.vcpus,
                                cpus_per_numa_node,
                                CONF.hyperv.limit_cpu_features,
                                dynamic_memory_ratio)

        flavor_extra_specs = instance.flavor.extra_specs
        remote_fx_config = flavor_extra_specs.get(
            constants.FLAVOR_REMOTE_FX_EXTRA_SPEC_KEY)
        if remote_fx_config:
            self._configure_remotefx(instance, vm_gen, remote_fx_config)

        self._vmutils.create_scsi_controller(instance_name)

        self._attach_root_device(instance_name, root_device)
        self._attach_ephemerals(instance_name, block_device_info['ephemerals'])
        self._volumeops.attach_volumes(
            block_device_info['block_device_mapping'], instance_name)

        serial_ports = self._get_image_serial_port_settings(image_meta)
        self._create_vm_com_port_pipes(instance, serial_ports)
        self._set_instance_disk_qos_specs(instance)

        for vif in network_info:
            LOG.debug('Creating nic for instance', instance=instance)
            self._vmutils.create_nic(instance_name, vif['id'], vif['address'])
            vif_driver = self._get_vif_driver(vif.get('type'))
            vif_driver.plug(instance, vif)

        if CONF.hyperv.enable_instance_metrics_collection:
            self._metricsutils.enable_vm_metrics_collection(instance_name)
        secure_boot_enabled = self._requires_secure_boot(
            instance, image_meta, vm_gen)
        if secure_boot_enabled:
            certificate_required = self._requires_certificate(
                instance.uuid, image_meta)
            self._vmutils.enable_secure_boot(instance.name,
                                             certificate_required)
예제 #22
0
    def create_instance(self, instance, network_info, root_device,
                        block_device_info, vm_gen, image_meta):
        instance_name = instance.name
        instance_path = os.path.join(CONF.instances_path, instance_name)

        memory_per_numa_node, cpus_per_numa_node = (
            self._get_instance_vnuma_config(instance, image_meta))

        if memory_per_numa_node:
            if CONF.hyperv.dynamic_memory_ratio > 1.0:
                LOG.warning(_LW(
                    "Instance vNUMA topology requested, but dynamic memory "
                    "ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
                    "memory ratio option."), instance=instance)
            dynamic_memory_ratio = 1.0
            vnuma_enabled = True
        else:
            dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
            vnuma_enabled = False

        self._vmutils.create_vm(instance_name,
                                vnuma_enabled,
                                vm_gen,
                                instance_path,
                                [instance.uuid])

        self._vmutils.update_vm(instance_name,
                                instance.memory_mb,
                                memory_per_numa_node,
                                instance.vcpus,
                                cpus_per_numa_node,
                                CONF.hyperv.limit_cpu_features,
                                dynamic_memory_ratio)

        flavor_extra_specs = instance.flavor.extra_specs
        remote_fx_config = flavor_extra_specs.get(
                constants.FLAVOR_REMOTE_FX_EXTRA_SPEC_KEY)
        if remote_fx_config:
            self._configure_remotefx(instance, vm_gen, remote_fx_config)

        self._vmutils.create_scsi_controller(instance_name)

        self._attach_root_device(instance_name, root_device)
        self._attach_ephemerals(instance_name, block_device_info['ephemerals'])
        self._volumeops.attach_volumes(
            block_device_info['block_device_mapping'], instance_name)

        serial_ports = self._get_image_serial_port_settings(image_meta)
        self._create_vm_com_port_pipes(instance, serial_ports)
        self._set_instance_disk_qos_specs(instance)

        for vif in network_info:
            LOG.debug('Creating nic for instance', instance=instance)
            self._vmutils.create_nic(instance_name,
                                     vif['id'],
                                     vif['address'])
            vif_driver = self._get_vif_driver(vif.get('type'))
            vif_driver.plug(instance, vif)

        if CONF.hyperv.enable_instance_metrics_collection:
            self._metricsutils.enable_vm_metrics_collection(instance_name)
        secure_boot_enabled = self._requires_secure_boot(
            instance, image_meta, vm_gen)
        if secure_boot_enabled:
            certificate_required = self._requires_certificate(instance.uuid,
                                                              image_meta)
            self._vmutils.enable_secure_boot(instance.name,
                                             certificate_required)
예제 #23
0
 def set_disk_qos_specs(self, vm_name, disk_path, min_iops, max_iops):
     LOG.warn(_LW("The root/virtualization WMI namespace does not "
                  "support QoS. Ignoring QoS specs."))
예제 #24
0
 def set_disk_qos_specs(self, connection_info, min_iops, max_iops):
     volume_type = connection_info.get('driver_volume_type', '')
     LOG.warning(_LW("The %s Hyper-V volume driver does not support QoS. "
                     "Ignoring QoS specs."), volume_type)
예제 #25
0
    def snapshot(self, context, instance, image_id, update_task_state):
        """Create snapshot from a running VM instance."""
        instance_name = instance.name

        LOG.debug("Creating snapshot for instance %s", instance_name)
        snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)

        export_dir = None

        try:
            src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)

            LOG.debug("Getting info for VHD %s", src_vhd_path)
            src_base_disk_path = self._vhdutils.get_vhd_parent_path(
                src_vhd_path)

            export_dir = self._pathutils.get_export_dir(instance_name)

            dest_vhd_path = os.path.join(export_dir,
                                         os.path.basename(src_vhd_path))
            LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s', {
                'src_vhd_path': src_vhd_path,
                'dest_vhd_path': dest_vhd_path
            })
            self._pathutils.copyfile(src_vhd_path, dest_vhd_path)

            image_vhd_path = None
            if not src_base_disk_path:
                image_vhd_path = dest_vhd_path
            else:
                basename = os.path.basename(src_base_disk_path)
                dest_base_disk_path = os.path.join(export_dir, basename)
                LOG.debug(
                    'Copying base disk %(src_vhd_path)s to '
                    '%(dest_base_disk_path)s', {
                        'src_vhd_path': src_vhd_path,
                        'dest_base_disk_path': dest_base_disk_path
                    })
                self._pathutils.copyfile(src_base_disk_path,
                                         dest_base_disk_path)

                LOG.debug(
                    "Reconnecting copied base VHD "
                    "%(dest_base_disk_path)s and diff "
                    "VHD %(dest_vhd_path)s", {
                        'dest_base_disk_path': dest_base_disk_path,
                        'dest_vhd_path': dest_vhd_path
                    })
                self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
                                                    dest_base_disk_path)

                LOG.debug(
                    "Merging base disk %(dest_base_disk_path)s and "
                    "diff disk %(dest_vhd_path)s", {
                        'dest_base_disk_path': dest_base_disk_path,
                        'dest_vhd_path': dest_vhd_path
                    })
                self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
                image_vhd_path = dest_base_disk_path

            LOG.debug(
                "Updating Glance image %(image_id)s with content from "
                "merged disk %(image_vhd_path)s", {
                    'image_id': image_id,
                    'image_vhd_path': image_vhd_path
                })
            update_task_state(task_state=task_states.IMAGE_UPLOADING,
                              expected_state=task_states.IMAGE_PENDING_UPLOAD)
            self._save_glance_image(context, image_id, image_vhd_path)

            LOG.debug(
                "Snapshot image %(image_id)s updated for VM "
                "%(instance_name)s", {
                    'image_id': image_id,
                    'instance_name': instance_name
                })
        finally:
            try:
                LOG.debug("Removing snapshot %s", image_id)
                self._vmutils.remove_vm_snapshot(snapshot_path)
            except Exception as ex:
                LOG.exception(ex)
                LOG.warning(_LW('Failed to remove snapshot for VM %s'),
                            instance_name)
            if export_dir:
                LOG.debug('Removing directory: %s', export_dir)
                self._pathutils.rmtree(export_dir)
예제 #26
0
 def set_disk_qos_specs(self, connection_info, instance_name,
                        min_iops, max_iops):
     LOG.warn(_LW("The iSCSI Hyper-V volume driver does not support QoS. "
                  "Ignoring QoS specs."))
예제 #27
0
 def set_disk_qos_specs(self, connection_info, min_iops, max_iops):
     volume_type = connection_info.get('driver_volume_type', '')
     LOG.warning(
         _LW("The %s Hyper-V volume driver does not support QoS. "
             "Ignoring QoS specs."), volume_type)
예제 #28
0
 def set_disk_qos_specs(self, connection_info, instance_name,
                        min_iops, max_iops):
     LOG.warn(_LW("The iSCSI Hyper-V volume driver does not support QoS. "
                  "Ignoring QoS specs."))
예제 #29
0
    def snapshot(self, context, instance, image_id, update_task_state):
        """Create snapshot from a running VM instance."""
        instance_name = instance.name

        LOG.debug("Creating snapshot for instance %s", instance_name)
        snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)

        export_dir = None

        try:
            src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)

            LOG.debug("Getting info for VHD %s", src_vhd_path)
            src_base_disk_path = self._vhdutils.get_vhd_parent_path(
                src_vhd_path)

            export_dir = self._pathutils.get_export_dir(instance_name)

            dest_vhd_path = os.path.join(export_dir, os.path.basename(
                src_vhd_path))
            LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s',
                      {'src_vhd_path': src_vhd_path,
                       'dest_vhd_path': dest_vhd_path})
            self._pathutils.copyfile(src_vhd_path, dest_vhd_path)

            image_vhd_path = None
            if not src_base_disk_path:
                image_vhd_path = dest_vhd_path
            else:
                basename = os.path.basename(src_base_disk_path)
                dest_base_disk_path = os.path.join(export_dir, basename)
                LOG.debug('Copying base disk %(src_vhd_path)s to '
                          '%(dest_base_disk_path)s',
                          {'src_vhd_path': src_vhd_path,
                           'dest_base_disk_path': dest_base_disk_path})
                self._pathutils.copyfile(src_base_disk_path,
                                         dest_base_disk_path)

                LOG.debug("Reconnecting copied base VHD "
                          "%(dest_base_disk_path)s and diff "
                          "VHD %(dest_vhd_path)s",
                          {'dest_base_disk_path': dest_base_disk_path,
                           'dest_vhd_path': dest_vhd_path})
                self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
                                                    dest_base_disk_path)

                LOG.debug("Merging base disk %(dest_base_disk_path)s and "
                          "diff disk %(dest_vhd_path)s",
                          {'dest_base_disk_path': dest_base_disk_path,
                           'dest_vhd_path': dest_vhd_path})
                self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
                image_vhd_path = dest_base_disk_path

            LOG.debug("Updating Glance image %(image_id)s with content from "
                      "merged disk %(image_vhd_path)s",
                      {'image_id': image_id, 'image_vhd_path': image_vhd_path})
            update_task_state(task_state=task_states.IMAGE_UPLOADING,
                              expected_state=task_states.IMAGE_PENDING_UPLOAD)
            self._save_glance_image(context, image_id, image_vhd_path)

            LOG.debug("Snapshot image %(image_id)s updated for VM "
                      "%(instance_name)s",
                      {'image_id': image_id, 'instance_name': instance_name})
        finally:
            try:
                LOG.debug("Removing snapshot %s", image_id)
                self._vmutils.remove_vm_snapshot(snapshot_path)
            except Exception as ex:
                LOG.exception(ex)
                LOG.warning(_LW('Failed to remove snapshot for VM %s'),
                            instance_name)
            if export_dir:
                LOG.debug('Removing directory: %s', export_dir)
                self._pathutils.rmtree(export_dir)
예제 #30
0
 def set_disk_qos_specs(self, vm_name, disk_path, min_iops, max_iops):
     LOG.warn(
         _LW("The root/virtualization WMI namespace does not "
             "support QoS. Ignoring QoS specs."))