def host_passes(self, host_state, filter_properties):
        ram_ratio = CONF.ram_allocation_ratio
        cpu_ratio = CONF.cpu_allocation_ratio
        instance = filter_properties.get('instance_properties', {})
        instance_topology = hardware.instance_topology_from_instance(instance)
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
                host_state)
        if instance_topology:
            if host_topology:
                if not hardware.VirtNUMAHostTopology.can_fit_instances(
                        host_topology, [instance_topology]):
                    return False

                limit_cells = []
                usage_after_instance = (
                        hardware.VirtNUMAHostTopology.usage_from_instances(
                            host_topology, [instance_topology]))
                for cell in usage_after_instance.cells:
                    max_cell_memory = int(cell.memory * ram_ratio)
                    max_cell_cpu = len(cell.cpuset) * cpu_ratio
                    if (cell.memory_usage > max_cell_memory or
                            cell.cpu_usage > max_cell_cpu):
                        return False
                    limit_cells.append(
                        hardware.VirtNUMATopologyCellLimit(
                            cell.id, cell.cpuset, cell.memory,
                            max_cell_cpu, max_cell_memory))
                host_state.limits['numa_topology'] = (
                        hardware.VirtNUMALimitTopology(
                            cells=limit_cells).to_json())
                return True
            else:
                return False
        else:
            return True
Esempio n. 2
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     instance = filter_properties.get('instance_properties', {})
     instance_topology = hardware.instance_topology_from_instance(instance)
     if instance_topology:
         if host_state.numa_topology:
             limit_cells = []
             usage_after_instance = (
                     hardware.get_host_numa_usage_from_instance(
                         host_state, instance, never_serialize_result=True))
             for cell in usage_after_instance.cells:
                 max_cell_memory = int(cell.memory * ram_ratio)
                 max_cell_cpu = len(cell.cpuset) * cpu_ratio
                 if (cell.memory_usage > max_cell_memory or
                         cell.cpu_usage > max_cell_cpu):
                     return False
                 limit_cells.append(
                     hardware.VirtNUMATopologyCellLimit(
                         cell.id, cell.cpuset, cell.memory,
                         max_cell_cpu, max_cell_memory))
             host_state.limits['numa_topology'] = (
                     hardware.VirtNUMALimitTopology(
                         cells=limit_cells).to_json())
             return True
         else:
             return False
     else:
         return True
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(
             cpu_allocation_ratio=cpu_ratio,
             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits=limits,
                     pci_requests=pci_requests,
                     pci_stats=host_state.pci_stats))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         return False
     else:
         return True
Esempio n. 4
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     if requested_topology and host_topology:
         limit_cells = []
         for cell in host_topology.cells:
             max_cell_memory = int(cell.memory * ram_ratio)
             max_cell_cpu = len(cell.cpuset) * cpu_ratio
             limit_cells.append(hardware.VirtNUMATopologyCellLimit(
                 cell.id, cell.cpuset, cell.memory,
                 max_cell_cpu, max_cell_memory))
         limits = hardware.VirtNUMALimitTopology(cells=limit_cells)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits_topology=limits))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits.to_json()
         instance['numa_topology'] = instance_topology
         return True
     elif requested_topology:
         return False
     else:
         return True
Esempio n. 5
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance["root_gb"] + instance["ephemeral_gb"]) * 1024
        ram_mb = instance["memory_mb"]
        vcpus = instance["vcpus"]
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get("pci_requests")
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests["requests"] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests.from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(self)
        instance_numa_topology = hardware.instance_topology_from_instance(instance)

        instance["numa_topology"] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get("numa_topology"),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats,
        )
        if pci_requests:
            instance_cells = None
            if instance["numa_topology"]:
                instance_cells = instance["numa_topology"].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(self, instance)

        vm_state = instance.get("vm_state", vm_states.BUILDING)
        task_state = instance.get("task_state")
        if vm_state == vm_states.BUILDING or task_state in [
            task_states.RESIZE_MIGRATING,
            task_states.REBUILDING,
            task_states.RESIZE_PREP,
            task_states.IMAGE_SNAPSHOT,
            task_states.IMAGE_BACKUP,
            task_states.UNSHELVING,
            task_states.RESCUING,
        ]:
            self.num_io_ops += 1
Esempio n. 6
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING]:
            self.num_io_ops += 1
Esempio n. 7
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. Thus, it has a .pci_requests field, which gets converted
        # to a primitive early on, and is thus a dict here. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = pci_requests.requests
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING]:
            self.num_io_ops += 1
Esempio n. 8
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. Thus, it has a .pci_requests field, which gets converted
        # to a primitive early on, and is thus a dict here. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            self.pci_stats.apply_requests(pci_requests.requests,
                                          instance_cells)

        # Calculate the numa usage
        instance['numa_topology'] = self.instance_numa_topology
        updated_numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)
        self.numa_topology = updated_numa_topology

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING
        ]:
            self.num_io_ops += 1
Esempio n. 9
0
    def _update_instance_topology(self, instance, chosen_host):
        if not instance.get('numa_topology'):
            return

        numa_limits = chosen_host.obj.limits.get('numa_topology', {})
        if isinstance(numa_limits, six.string_types):
            limits = hardware.VirtNUMALimitTopology.from_json(numa_limits)
        else:
            limits = hardware.VirtNUMALimitTopology(numa_limits.get('cells'))

        host_topology, __ = hardware.host_topology_and_format_from_host(
            chosen_host.obj)
        instance_topology = hardware.instance_topology_from_instance(instance)
        instance_topology = (hardware.numa_fit_instance_to_host(
            host_topology, instance_topology, limits_topology=limits))
        if instance_topology:
            instance['numa_topology'] = instance_topology
Esempio n. 10
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_instance() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Esempio n. 11
0
    def update_pci_for_migration(self, context, instance, sign=1):
        """Update instance's pci usage information when it is migrated.

        The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.

        :param sign: claim devices for instance when sign is 1, remove
                     the claims when sign is -1
        """
        uuid = instance['uuid']
        pci_requests = objects.InstancePCIRequests.get_by_instance(
                context, instance)
        instance_numa_topology = hardware.instance_topology_from_instance(
                instance)
        if sign == 1 and uuid not in self.claims:
            devs = self._claim_instance(context, pci_requests,
                                        instance_numa_topology)
            if devs:
                self.claims[uuid] = devs
        if sign == -1 and uuid in self.claims:
            self._free_instance(instance)
Esempio n. 12
0
    def update_pci_for_migration(self, context, instance, sign=1):
        """Update instance's pci usage information when it is migrated.

        The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.

        :param sign: claim devices for instance when sign is 1, remove
                     the claims when sign is -1
        """
        uuid = instance['uuid']
        pci_requests = objects.InstancePCIRequests.get_by_instance(
            context, instance)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        if sign == 1 and uuid not in self.claims:
            devs = self._claim_instance(context, pci_requests,
                                        instance_numa_topology)
            if devs:
                self.claims[uuid] = devs
        if sign == -1 and uuid in self.claims:
            self._free_instance(instance)
Esempio n. 13
0
    def _claim_instance(self, context, instance, prefix=""):
        pci_requests = objects.InstancePCIRequests.get_by_instance(context, instance)
        if not pci_requests.requests:
            return None
        instance_numa_topology = hardware.instance_topology_from_instance(instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        devs = self.stats.consume_requests(pci_requests.requests, instance_cells)
        if not devs:
            raise exception.PciDeviceRequestFailed(pci_requests)
        for dev in devs:
            device.claim(dev, instance)
        if instance_numa_topology and any(dev.numa_node is None for dev in devs):
            LOG.warning(
                _LW("Assigning a pci device without numa affinity to" "instance %(instance)s which has numa topology"),
                {"instance": instance["uuid"]},
            )
        return devs
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_instance() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Esempio n. 15
0
    def _update_instance_topology(self, instance, chosen_host):
        if not instance.get('numa_topology'):
            return

        numa_limits = chosen_host.obj.limits.get('numa_topology', {})
        if isinstance(numa_limits, six.string_types):
            limits = hardware.VirtNUMALimitTopology.from_json(numa_limits)
        else:
            limits = hardware.VirtNUMALimitTopology(
                numa_limits.get('cells'))

        host_topology, __ = hardware.host_topology_and_format_from_host(
            chosen_host.obj)
        instance_topology = hardware.instance_topology_from_instance(
            instance)
        instance_topology = (
            hardware.numa_fit_instance_to_host(
                host_topology, instance_topology,
                limits_topology=limits))
        if instance_topology:
            instance['numa_topology'] = instance_topology
Esempio n. 16
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(
             cpu_allocation_ratio=cpu_ratio,
             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits=limits,
                     pci_requests=pci_requests,
                     pci_stats=host_state.pci_stats))
         if not instance_topology:
             LOG.debug("%(host)s, %(node)s fails NUMA topology "
                       "requirements. The instance does not fit on this "
                       "host.", {'host': host_state.host,
                                 'node': host_state.nodename},
                       instance_uuid=instance.get('instance_uuid'))
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                   "No host NUMA topology while the instance specified "
                   "one.",
                   {'host': host_state.host, 'node': host_state.nodename},
                   instance_uuid=instance.get('instance_uuid'))
         return False
     else:
         return True
Esempio n. 17
0
    def _claim_instance(self, context, instance, prefix=''):
        pci_requests = objects.InstancePCIRequests.get_by_instance(
            context, instance)
        if not pci_requests.requests:
            return None
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        devs = self.stats.consume_requests(pci_requests.requests,
                                           instance_cells)
        if not devs:
            raise exception.PciDeviceRequestFailed(pci_requests)
        for dev in devs:
            device.claim(dev, instance)
        if instance_numa_topology and any(
                                        dev.numa_node is None for dev in devs):
            LOG.warning(_LW("Assigning a pci device without numa affinity to"
            "instance %(instance)s which has numa topology"),
                        {'instance': instance['uuid']})
        return devs
Esempio n. 18
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
         host_state)
     pci_requests = filter_properties.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limit_cells = []
         for cell in host_topology.cells:
             max_cell_memory = int(cell.memory * ram_ratio)
             max_cell_cpu = len(cell.cpuset) * cpu_ratio
             limit_cells.append(
                 hardware.VirtNUMATopologyCellLimit(cell.id, cell.cpuset,
                                                    cell.memory,
                                                    max_cell_cpu,
                                                    max_cell_memory))
         limits = hardware.VirtNUMALimitTopology(cells=limit_cells)
         instance_topology = (hardware.numa_fit_instance_to_host(
             host_topology,
             requested_topology,
             limits_topology=limits,
             pci_requests=pci_requests,
             pci_stats=host_state.pci_stats))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits.to_json()
         instance['numa_topology'] = instance_topology
         return True
     elif requested_topology:
         return False
     else:
         return True
Esempio n. 19
0
    def _claim_instance(self, context, instance, prefix=''):
        pci_requests = objects.InstancePCIRequests.get_by_instance(
            context, instance)
        if not pci_requests.requests:
            return None
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        devs = self.stats.consume_requests(pci_requests.requests,
                                           instance_cells)
        if not devs:
            return None

        for dev in devs:
            dev.claim(instance)
        if instance_numa_topology and any(
                                        dev.numa_node is None for dev in devs):
            LOG.warning(_LW("Assigning a pci device without numa affinity to"
            "instance %(instance)s which has numa topology"),
                        {'instance': instance['uuid']})
        return devs
Esempio n. 20
0
 def _from_instance_numa_topology(self, numa_topology):
     if isinstance(numa_topology, dict):
         self.numa_topology = hardware.instance_topology_from_instance(
             dict(numa_topology=numa_topology))
     else:
         self.numa_topology = numa_topology
Esempio n. 21
0
    def __init__(self,
                 instance,
                 address=None,
                 content=None,
                 extra_md=None,
                 network_info=None,
                 vd_driver=None,
                 network_metadata=None,
                 request_context=None):
        """Creation of this object should basically cover all time consuming
        collection.  Methods after that should not cause time delays due to
        network operations or lengthy cpu operations.

        The user should then get a single instance and make multiple method
        calls on it.
        """
        if not content:
            content = []

        ctxt = context.get_admin_context()

        # NOTE(danms): Sanitize the instance to limit the amount of stuff
        # inside that may not pickle well (i.e. context). We also touch
        # some of the things we'll lazy load later to make sure we keep their
        # values in what we cache.
        instance.ec2_ids
        instance.keypairs
        instance.device_metadata
        instance.numa_topology
        instance = objects.Instance.obj_from_primitive(
            instance.obj_to_primitive())

        # The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN
        self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
        self.instance = instance
        self.extra_md = extra_md

        self.availability_zone = instance.get('availability_zone')

        secgroup_api = openstack_driver.get_openstack_security_group_driver()
        self.security_groups = secgroup_api.get_instance_security_groups(
            ctxt, instance)

        self.mappings = _format_instance_mapping(ctxt, instance)

        if instance.user_data is not None:
            self.userdata_raw = base64.decode_as_bytes(instance.user_data)
        else:
            self.userdata_raw = None

        self.address = address

        # expose instance metadata.
        self.launch_metadata = utils.instance_meta(instance)

        self.password = password.extract_password(instance)

        self.uuid = instance.uuid

        self.content = {}
        self.files = []

        # get network info, and the rendered network template
        if network_info is None:
            network_info = instance.info_cache.network_info

        # expose network metadata
        if network_metadata is None:
            self.network_metadata = netutils.get_network_metadata(network_info)
        else:
            self.network_metadata = network_metadata

        self.ip_info = \
                ec2utils.get_ip_info_for_instance_from_nw_info(network_info)

        self.network_config = None
        cfg = netutils.get_injected_network_template(network_info)

        if cfg:
            key = "%04i" % len(self.content)
            self.content[key] = cfg
            self.network_config = {
                "name": "network_config",
                'content_path': "/%s/%s" % (CONTENT_DIR, key)
            }

        # 'content' is passed in from the configdrive code in
        # nova/virt/libvirt/driver.py.  That's how we get the injected files
        # (personalities) in. AFAIK they're not stored in the db at all,
        # so are not available later (web service metadata time).
        for (path, contents) in content:
            key = "%04i" % len(self.content)
            self.files.append({
                'path': path,
                'content_path': "/%s/%s" % (CONTENT_DIR, key)
            })
            self.content[key] = contents

        if vd_driver is None:
            vdclass = importutils.import_class(CONF.vendordata_driver)
        else:
            vdclass = vd_driver

        self.vddriver = vdclass(instance=instance,
                                address=address,
                                extra_md=extra_md,
                                network_info=network_info)

        self.route_configuration = None

        # NOTE(mikal): the decision to not pass extra_md here like we
        # do to the StaticJSON driver is deliberate. extra_md will
        # contain the admin password for the instance, and we shouldn't
        # pass that to external services.
        self.vendordata_providers = {
            'StaticJSON':
            vendordata_json.JsonFileVendorData(instance=instance,
                                               address=address,
                                               extra_md=extra_md,
                                               network_info=network_info),
            'DynamicJSON':
            vendordata_dynamic.DynamicVendorData(instance=instance,
                                                 address=address,
                                                 network_info=network_info,
                                                 context=request_context)
        }

        instance_numa_topology = \
            hardware.instance_topology_from_instance(instance)
        if instance_numa_topology is not None:
            self.offline_cpuset = instance_numa_topology.offline_cpus
        else:
            self.offline_cpuset = set([])
Esempio n. 22
0
 def _from_instance_numa_topology(self, numa_topology):
     if isinstance(numa_topology, dict):
         self.numa_topology = hardware.instance_topology_from_instance(
             dict(numa_topology=numa_topology))
     else:
         self.numa_topology = numa_topology
Esempio n. 23
0
    def _test_numa_topology(self, resources, limit):
        host_topology = (resources.numa_topology
                         if 'numa_topology' in resources else None)
        requested_topology = self.numa_topology
        if host_topology:
            # WRS - numa affinity requires extra_specs
            # NOTE(jgauld): Require the old fix 32558ef to define self.flavor,
            # based on 132eae7 Bug 181 fix: claim _test_numa_topology() to look
            # into destination extra_spec.
            extra_specs = self.flavor.get('extra_specs', {})

            host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            pci_requests = self._pci_requests
            pci_stats = None
            if pci_requests.requests:
                pci_stats = self.tracker.pci_tracker.stats

            # WRS: Support strict vs prefer allocation of PCI devices.
            # If strict fails, fallback to prefer.
            pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                                'strict')
            pci_strict = False if pci_numa_affinity == 'prefer' else True

            details = utils.details_initialize(details=None)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limit,
                pci_requests=pci_requests.requests,
                pci_stats=pci_stats,
                details=details,
                pci_strict=pci_strict))

            if requested_topology and not instance_topology:
                msg = details.get('reason', [])
                LOG.info(
                    '%(class)s: (%(node)s) REJECT: %(desc)s', {
                        'class': self.__class__.__name__,
                        'node': self.nodename,
                        'desc': ', '.join(msg)
                    })
                if pci_requests.requests:
                    return (_("Requested instance NUMA topology together with"
                              " requested PCI devices cannot fit the given"
                              " host NUMA topology"))
                else:
                    return (_("Requested instance NUMA topology cannot fit "
                              "the given host NUMA topology"))
            elif instance_topology:
                # Adjust the claimed pCPUs to handle scaled-down instances
                # Catch exceptions here so that claims will return with
                # fail message when there is an underlying pinning issue.
                try:
                    orig_instance_topology = \
                        hardware.instance_topology_from_instance(self.instance)
                    if orig_instance_topology is not None:
                        offline_cpus = orig_instance_topology.offline_cpus
                        instance_topology.set_cpus_offline(offline_cpus)
                except Exception as e:
                    LOG.error(
                        'Cannot query offline_cpus from requested '
                        'instance NUMA topology, err=%(err)s', {'err': e},
                        self.instance)
                    return (_('Cannot query offline cpus from requested '
                              'instance NUMA topology'))
                self.claimed_numa_topology = instance_topology