Exemplo n.º 1
0
    def _test_numa_topology(self, resources, limit):
        host_topology = resources.numa_topology if "numa_topology" in resources else None
        requested_topology = self.numa_topology
        if host_topology:
            host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            pci_requests = self._pci_requests
            pci_stats = None
            if pci_requests.requests:
                pci_stats = self.tracker.pci_tracker.stats

            instance_topology = hardware.numa_fit_instance_to_host(
                host_topology, requested_topology, limits=limit, pci_requests=pci_requests.requests, pci_stats=pci_stats
            )

            if requested_topology and not instance_topology:
                if pci_requests.requests:
                    return _(
                        "Requested instance NUMA topology together with"
                        " requested PCI devices cannot fit the given"
                        " host NUMA topology"
                    )
                else:
                    return _("Requested instance NUMA topology cannot fit " "the given host NUMA topology")
            elif instance_topology:
                self.claimed_numa_topology = instance_topology
Exemplo n.º 2
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
         host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
             host_topology,
             requested_topology,
             limits=limits,
             pci_requests=pci_requests,
             pci_stats=host_state.pci_stats))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         return False
     else:
         return True
Exemplo n.º 3
0
    def _test_numa_topology(self, resources, limit):
        host_topology = resources.get('numa_topology')
        requested_topology = self.numa_topology
        if host_topology:
            host_topology = objects.NUMATopology.obj_from_db_obj(
                    host_topology)
            pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
                                        self.context, self.instance.uuid)

            pci_stats = None
            if pci_requests.requests:
                pci_stats = self.tracker.pci_tracker.stats

            instance_topology = (
                    hardware.numa_fit_instance_to_host(
                        host_topology, requested_topology,
                        limits=limit,
                        pci_requests=pci_requests.requests,
                        pci_stats=pci_stats,flavor=self.flavor))

            if requested_topology and not instance_topology:
                if pci_requests.requests:
                    return (_("Requested instance NUMA topology together with"
                              " requested PCI devices cannot fit the given"
                              " host NUMA topology"))
                else:
                    return (_("Requested instance NUMA topology cannot fit "
                          "the given host NUMA topology"))
            elif instance_topology:
                self.claimed_numa_topology = instance_topology
Exemplo n.º 4
0
    def _test_numa_topology(self, resources, limit):
        host_topology = (resources.numa_topology
                         if 'numa_topology' in resources else None)
        requested_topology = self.numa_topology
        if host_topology:
            host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            pci_requests = self._pci_requests
            pci_stats = None
            if pci_requests.requests:
                pci_stats = self.tracker.pci_tracker.stats

            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limit,
                pci_requests=pci_requests.requests,
                pci_stats=pci_stats))

            if requested_topology and not instance_topology:
                if pci_requests.requests:
                    return (_("Requested instance NUMA topology together with"
                              " requested PCI devices cannot fit the given"
                              " host NUMA topology"))
                else:
                    return (_("Requested instance NUMA topology cannot fit "
                              "the given host NUMA topology"))
            elif instance_topology:
                self.claimed_numa_topology = instance_topology
Exemplo n.º 5
0
    def _test_numa_topology(self, resources, limit):
        host_topology = resources.get('numa_topology')
        requested_topology = self.numa_topology
        if host_topology:
            host_topology = objects.NUMATopology.obj_from_db_obj(
                    host_topology)
            pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
                                        self.context, self.instance['uuid'])

            pci_stats = None
            if pci_requests.requests:
                pci_stats = self.tracker.pci_tracker.stats

            instance_topology = (
                    hardware.numa_fit_instance_to_host(
                        host_topology, requested_topology,
                        limits=limit,
                        pci_requests=pci_requests.requests,
                        pci_stats=pci_stats))

            if requested_topology and not instance_topology:
                if pci_requests.requests:
                    return (_("Requested instance NUMA topology together with"
                              " requested PCI devices cannot fit the given"
                              " host NUMA topology"))
                else:
                    return (_("Requested instance NUMA topology cannot fit "
                          "the given host NUMA topology"))
            elif instance_topology:
                self.claimed_numa_topology = instance_topology
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(
             cpu_allocation_ratio=cpu_ratio,
             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits=limits,
                     pci_requests=pci_requests,
                     pci_stats=host_state.pci_stats))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         return False
     else:
         return True
Exemplo n.º 7
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     if requested_topology and host_topology:
         limit_cells = []
         for cell in host_topology.cells:
             max_cell_memory = int(cell.memory * ram_ratio)
             max_cell_cpu = len(cell.cpuset) * cpu_ratio
             limit_cells.append(hardware.VirtNUMATopologyCellLimit(
                 cell.id, cell.cpuset, cell.memory,
                 max_cell_cpu, max_cell_memory))
         limits = hardware.VirtNUMALimitTopology(cells=limit_cells)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits_topology=limits))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits.to_json()
         instance['numa_topology'] = instance_topology
         return True
     elif requested_topology:
         return False
     else:
         return True
Exemplo n.º 8
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
         host_state)
     if requested_topology and host_topology:
         limit_cells = []
         for cell in host_topology.cells:
             max_cell_memory = int(cell.memory * ram_ratio)
             max_cell_cpu = len(cell.cpuset) * cpu_ratio
             limit_cells.append(
                 hardware.VirtNUMATopologyCellLimit(cell.id, cell.cpuset,
                                                    cell.memory,
                                                    max_cell_cpu,
                                                    max_cell_memory))
         limits = hardware.VirtNUMALimitTopology(cells=limit_cells)
         instance_topology = (hardware.numa_fit_instance_to_host(
             host_topology, requested_topology, limits_topology=limits))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits.to_json()
         instance['numa_topology'] = instance_topology
         return True
     elif requested_topology:
         return False
     else:
         return True
Exemplo n.º 9
0
    def host_passes(self, host_state, spec_obj):
        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Exemplo n.º 10
0
def _pin(instance, compute_node):
    _validate_empty_pinning(instance)
    host_topology = objects.numa.NUMATopology.obj_from_db_obj(
        compute_node.numa_topology)
    pinned = hardware.numa_fit_instance_to_host(host_topology,
                                                instance.numa_topology)
    instance.numa_topology = pinned
    _update_usage(instance, compute_node, free=False)
Exemplo n.º 11
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance["root_gb"] + instance["ephemeral_gb"]) * 1024
        ram_mb = instance["memory_mb"]
        vcpus = instance["vcpus"]
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get("pci_requests")
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests["requests"] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests.from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(self)
        instance_numa_topology = hardware.instance_topology_from_instance(instance)

        instance["numa_topology"] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get("numa_topology"),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats,
        )
        if pci_requests:
            instance_cells = None
            if instance["numa_topology"]:
                instance_cells = instance["numa_topology"].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(self, instance)

        vm_state = instance.get("vm_state", vm_states.BUILDING)
        task_state = instance.get("task_state")
        if vm_state == vm_states.BUILDING or task_state in [
            task_states.RESIZE_MIGRATING,
            task_states.REBUILDING,
            task_states.RESIZE_PREP,
            task_states.IMAGE_SNAPSHOT,
            task_states.IMAGE_BACKUP,
            task_states.UNSHELVING,
            task_states.RESCUING,
        ]:
            self.num_io_ops += 1
Exemplo n.º 12
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING
        ]:
            self.num_io_ops += 1
Exemplo n.º 13
0
    def _update_usage_from_migration(self, context, instance, image_meta, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s") % uuid)

        incoming = migration.dest_compute == self.host and migration.dest_node == self.nodename
        outbound = migration.source_compute == self.host and migration.source_node == self.nodename
        same_node = incoming and outbound

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if instance["instance_type_id"] == migration.old_instance_type_id:
                itype = self._get_instance_type(context, instance, "new_", migration.new_instance_type_id)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, "old_", migration.old_instance_type_id)

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, "new_", migration.new_instance_type_id)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, "old_", migration.old_instance_type_id)

        if image_meta is None:
            image_meta = objects.ImageMeta.from_instance(instance)
        # TODO(jaypipes): Remove when image_meta is always passed
        # as an objects.ImageMeta
        elif not isinstance(image_meta, objects.ImageMeta):
            image_meta = objects.ImageMeta.from_dict(image_meta)

        if itype:
            host_topology = self.compute_node.get("numa_topology")
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = hardware.numa_fit_instance_to_host(host_topology, numa_topology)
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(usage)
            if self.pci_tracker:
                obj = self.pci_tracker.stats.to_device_pools_obj()
                self.compute_node.pci_device_pools = obj
            else:
                obj = objects.PciDevicePoolList()
                self.compute_node.pci_device_pools = obj
            self.tracked_migrations[uuid] = (migration, itype)
Exemplo n.º 14
0
 def _test_numa_topology(self, resources, limit):
     host_topology = resources.get('numa_topology')
     requested_topology = self.numa_topology
     if host_topology:
         host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
         instance_topology = (hardware.numa_fit_instance_to_host(
             host_topology, requested_topology, limits_topology=limit))
         if requested_topology and not instance_topology:
             return (_("Requested instance NUMA topology cannot fit "
                       "the given host NUMA topology"))
         elif instance_topology:
             self.claimed_numa_topology = instance_topology
Exemplo n.º 15
0
    def host_passes(self, host_state, spec_obj):
        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
                host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(
                cpu_allocation_ratio=cpu_ratio,
                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                        host_topology, requested_topology,
                        limits=limits,
                        pci_requests=pci_requests,
                        pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug("%(host)s, %(node)s fails NUMA topology "
                          "requirements. The instance does not fit on this "
                          "host.", {'host': host_state.host,
                                    'node': host_state.nodename},
                          instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                      "No host NUMA topology while the instance specified "
                      "one.",
                      {'host': host_state.host, 'node': host_state.nodename},
                      instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Exemplo n.º 16
0
    def consume_from_request(self, spec_obj):
        """Incrementally update host state from an RequestSpec object."""
        disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = spec_obj.numa_topology

        spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
        # NUMA helpers because now we're 100% sure that spec_obj.numa_topology
        # is an InstanceNUMATopology object. Unfortunately, since
        # HostState.host_numa_topology is still limbo between an NUMATopology
        # object (when updated by consume_from_request), a ComputeNode object
        # (when updated by update_from_compute_node), we need to keep the call
        # to get_host_numa_usage_from_instance until it's fixed (and use a
        # temporary orphaned Instance object as a proxy)
        instance = objects.Instance(numa_topology=spec_obj.numa_topology)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Exemplo n.º 17
0
    def host_passes(self, host_state, spec_obj):
        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats,
                flavor=spec_obj.flavor))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Exemplo n.º 18
0
    def consume_from_request(self, spec_obj):
        """Incrementally update host state from an RequestSpec object."""
        disk_mb = (spec_obj.root_gb +
                   spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = spec_obj.numa_topology

        spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
        # NUMA helpers because now we're 100% sure that spec_obj.numa_topology
        # is an InstanceNUMATopology object. Unfortunately, since
        # HostState.host_numa_topology is still limbo between an NUMATopology
        # object (when updated by consume_from_request), a ComputeNode object
        # (when updated by update_from_compute_node), we need to keep the call
        # to get_host_numa_usage_from_instance until it's fixed (and use a
        # temporary orphaned Instance object as a proxy)
        instance = objects.Instance(numa_topology=spec_obj.numa_topology)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Exemplo n.º 19
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING]:
            self.num_io_ops += 1
    def _update_usage_from_migration(self, context, instance, image_meta, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s") % uuid)

        incoming = migration.dest_compute == self.host and migration.dest_node == self.nodename
        outbound = migration.source_compute == self.host and migration.source_node == self.nodename
        same_node = incoming and outbound

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if instance["instance_type_id"] == migration.old_instance_type_id:
                itype = self._get_instance_type(context, instance, "new_", migration.new_instance_type_id)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, "old_", migration.old_instance_type_id)

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, "new_", migration.new_instance_type_id)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, "old_", migration.old_instance_type_id)

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(instance["system_metadata"])

        if itype:
            host_topology = resources.get("numa_topology")
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = hardware.numa_fit_instance_to_host(host_topology, numa_topology)
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources["pci_device_pools"] = self.pci_tracker.stats
            else:
                resources["pci_device_pools"] = []
            self.tracked_migrations[uuid] = (migration, itype)
Exemplo n.º 21
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. Thus, it has a .pci_requests field, which gets converted
        # to a primitive early on, and is thus a dict here. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = pci_requests.requests
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING]:
            self.num_io_ops += 1
Exemplo n.º 22
0
 def _test_numa_topology(self, resources, limit):
     host_topology = resources.get('numa_topology')
     requested_topology = self.numa_topology
     if host_topology:
         host_topology = objects.NUMATopology.obj_from_db_obj(
                 host_topology)
         instance_topology = (
                 hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits_topology=limit))
         if requested_topology and not instance_topology:
             return (_("Requested instance NUMA topology cannot fit "
                       "the given host NUMA topology"))
         elif instance_topology:
             self.claimed_numa_topology = instance_topology
Exemplo n.º 23
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_instance() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Exemplo n.º 24
0
    def _update_instance_topology(self, instance, chosen_host):
        if not instance.get('numa_topology'):
            return

        numa_limits = chosen_host.obj.limits.get('numa_topology', {})
        if isinstance(numa_limits, six.string_types):
            limits = hardware.VirtNUMALimitTopology.from_json(numa_limits)
        else:
            limits = hardware.VirtNUMALimitTopology(numa_limits.get('cells'))

        host_topology, __ = hardware.host_topology_and_format_from_host(
            chosen_host.obj)
        instance_topology = hardware.instance_topology_from_instance(instance)
        instance_topology = (hardware.numa_fit_instance_to_host(
            host_topology, instance_topology, limits_topology=limits))
        if instance_topology:
            instance['numa_topology'] = instance_topology
Exemplo n.º 25
0
    def host_passes(self, host_state, spec_obj):
        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
                host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(
                cpu_allocation_ratio=cpu_ratio,
                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                        host_topology, requested_topology,
                        limits=limits,
                        pci_requests=pci_requests,
                        pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug("%(host)s, %(node)s fails NUMA topology "
                          "requirements. The instance does not fit on this "
                          "host.", {'host': host_state.host,
                                    'node': host_state.nodename},
                          instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                      "No host NUMA topology while the instance specified "
                      "one.",
                      {'host': host_state.host, 'node': host_state.nodename},
                      instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_instance() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Exemplo n.º 27
0
    def _update_instance_topology(self, instance, chosen_host):
        if not instance.get('numa_topology'):
            return

        numa_limits = chosen_host.obj.limits.get('numa_topology', {})
        if isinstance(numa_limits, six.string_types):
            limits = hardware.VirtNUMALimitTopology.from_json(numa_limits)
        else:
            limits = hardware.VirtNUMALimitTopology(
                numa_limits.get('cells'))

        host_topology, __ = hardware.host_topology_and_format_from_host(
            chosen_host.obj)
        instance_topology = hardware.instance_topology_from_instance(
            instance)
        instance_topology = (
            hardware.numa_fit_instance_to_host(
                host_topology, instance_topology,
                limits_topology=limits))
        if instance_topology:
            instance['numa_topology'] = instance_topology
Exemplo n.º 28
0
    def _locked_consume_from_request(self, spec_obj):
        disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the NUMA usage...
        if self.numa_topology and spec_obj.numa_topology:
            spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
                self.numa_topology,
                spec_obj.numa_topology,
                limits=self.limits.get('numa_topology'),
                pci_requests=pci_requests,
                pci_stats=self.pci_stats)

            self.numa_topology = hardware.numa_usage_from_instance_numa(
                self.numa_topology, spec_obj.numa_topology)

        # ...and the PCI usage
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Exemplo n.º 29
0
 def host_passes(self, host_state, spec_obj):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     requested_topology = spec_obj.numa_topology
     host_topology, _fmt = hardware.host_topology_and_format_from_host(host_state)
     pci_requests = spec_obj.pci_requests
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio, ram_allocation_ratio=ram_ratio)
         instance_topology = hardware.numa_fit_instance_to_host(
             host_topology,
             requested_topology,
             limits=limits,
             pci_requests=pci_requests,
             pci_stats=host_state.pci_stats,
         )
         if not instance_topology:
             LOG.debug(
                 "%(host)s, %(node)s fails NUMA topology "
                 "requirements. The instance does not fit on this "
                 "host.",
                 {"host": host_state.host, "node": host_state.nodename},
                 instance_uuid=spec_obj.instance_uuid,
             )
             return False
         host_state.limits["numa_topology"] = limits
         return True
     elif requested_topology:
         LOG.debug(
             "%(host)s, %(node)s fails NUMA topology requirements. "
             "No host NUMA topology while the instance specified "
             "one.",
             {"host": host_state.host, "node": host_state.nodename},
             instance_uuid=spec_obj.instance_uuid,
         )
         return False
     else:
         return True
Exemplo n.º 30
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(
             cpu_allocation_ratio=cpu_ratio,
             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits=limits,
                     pci_requests=pci_requests,
                     pci_stats=host_state.pci_stats))
         if not instance_topology:
             LOG.debug("%(host)s, %(node)s fails NUMA topology "
                       "requirements. The instance does not fit on this "
                       "host.", {'host': host_state.host,
                                 'node': host_state.nodename},
                       instance_uuid=instance.get('instance_uuid'))
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                   "No host NUMA topology while the instance specified "
                   "one.",
                   {'host': host_state.host, 'node': host_state.nodename},
                   instance_uuid=instance.get('instance_uuid'))
         return False
     else:
         return True
Exemplo n.º 31
0
    def host_passes(self, host_state, spec_obj):
        # WRS - disable this filter for non-libvirt hypervisor
        if not utils.is_libvirt_compute(host_state):
            return True

        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        # Note that we still need to pass the original spec_obj to
        # filter_reject so the error message persists.
        cloned_spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = cloned_spec_obj.flavor.extra_specs
        image_props = cloned_spec_obj.image.properties
        requested_topology = cloned_spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = cloned_spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        details = utils.details_initialize(details=None)

        if not self._satisfies_cpu_policy(
                host_state, extra_specs, image_props, details=details):
            msg = 'Host not useable. ' + ', '.join(details.get('reason', []))
            self.filter_reject(host_state, spec_obj, msg)
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)

            # WRS: Support strict vs prefer allocation of PCI devices.
            pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                                'strict')
            pci_strict = False if pci_numa_affinity == 'prefer' else True

            # L3 CAT Support
            if any(cell.cachetune_requested
                   for cell in requested_topology.cells):
                free_closids = (host_state.l3_closids -
                                host_state.l3_closids_used)
                if free_closids < 1:
                    msg = ('Insufficient L3 closids: '
                           'req:%(req)s, avail:%(avail)s' % {
                               'req': 1,
                               'avail': free_closids
                           })
                    self.filter_reject(host_state, spec_obj, msg)
                    return False
                # save limit for compute node to test against
                host_state.limits['closids'] = host_state.l3_closids

            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats,
                details=details,
                pci_strict=pci_strict))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                msg = details.get('reason', [])
                self.filter_reject(host_state, spec_obj, msg)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            msg = 'Missing host topology'
            self.filter_reject(host_state, spec_obj, msg)
            return False
        else:
            return True
Exemplo n.º 32
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host
                    and migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host
                    and migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration['old_instance_type_id']):
                itype = self._get_instance_type(
                    context, instance, 'new_',
                    migration['new_instance_type_id'])
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(
                    context, instance, 'old_',
                    migration['old_instance_type_id'])

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                                            migration['new_instance_type_id'])

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                                            migration['old_instance_type_id'])

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(
                instance['system_metadata'])

        if itype:
            host_topology = resources.get('numa_topology')
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(
                    host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = (hardware.numa_fit_instance_to_host(
                host_topology, numa_topology))
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources['pci_stats'] = jsonutils.dumps(
                    self.pci_tracker.stats)
            else:
                resources['pci_stats'] = jsonutils.dumps([])
            self.tracked_migrations[uuid] = (migration, itype)
Exemplo n.º 33
0
    def _test_numa_topology(self, resources, limit):
        host_topology = (resources.numa_topology
                         if 'numa_topology' in resources else None)
        requested_topology = self.numa_topology
        if host_topology:
            # WRS - numa affinity requires extra_specs
            # NOTE(jgauld): Require the old fix 32558ef to define self.flavor,
            # based on 132eae7 Bug 181 fix: claim _test_numa_topology() to look
            # into destination extra_spec.
            extra_specs = self.flavor.get('extra_specs', {})

            host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            pci_requests = self._pci_requests
            pci_stats = None
            if pci_requests.requests:
                pci_stats = self.tracker.pci_tracker.stats

            # WRS: Support strict vs prefer allocation of PCI devices.
            # If strict fails, fallback to prefer.
            pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                                'strict')
            pci_strict = False if pci_numa_affinity == 'prefer' else True

            details = utils.details_initialize(details=None)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limit,
                pci_requests=pci_requests.requests,
                pci_stats=pci_stats,
                details=details,
                pci_strict=pci_strict))

            if requested_topology and not instance_topology:
                msg = details.get('reason', [])
                LOG.info(
                    '%(class)s: (%(node)s) REJECT: %(desc)s', {
                        'class': self.__class__.__name__,
                        'node': self.nodename,
                        'desc': ', '.join(msg)
                    })
                if pci_requests.requests:
                    return (_("Requested instance NUMA topology together with"
                              " requested PCI devices cannot fit the given"
                              " host NUMA topology"))
                else:
                    return (_("Requested instance NUMA topology cannot fit "
                              "the given host NUMA topology"))
            elif instance_topology:
                # Adjust the claimed pCPUs to handle scaled-down instances
                # Catch exceptions here so that claims will return with
                # fail message when there is an underlying pinning issue.
                try:
                    orig_instance_topology = \
                        hardware.instance_topology_from_instance(self.instance)
                    if orig_instance_topology is not None:
                        offline_cpus = orig_instance_topology.offline_cpus
                        instance_topology.set_cpus_offline(offline_cpus)
                except Exception as e:
                    LOG.error(
                        'Cannot query offline_cpus from requested '
                        'instance NUMA topology, err=%(err)s', {'err': e},
                        self.instance)
                    return (_('Cannot query offline cpus from requested '
                              'instance NUMA topology'))
                self.claimed_numa_topology = instance_topology
Exemplo n.º 34
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration['old_instance_type_id']):
                itype = self._get_instance_type(context, instance, 'new_',
                        migration['new_instance_type_id'])
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                        migration['old_instance_type_id'])

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                    migration['new_instance_type_id'])

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                    migration['old_instance_type_id'])

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(
                    instance['system_metadata'])

        if itype:
            host_topology = resources.get('numa_topology')
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(
                        host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = (
                    hardware.numa_fit_instance_to_host(
                        host_topology, numa_topology))
            usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources['pci_stats'] = jsonutils.dumps(
                        self.pci_tracker.stats)
            else:
                resources['pci_stats'] = jsonutils.dumps([])
            self.tracked_migrations[uuid] = (migration, itype)
Exemplo n.º 35
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        if not self._is_trackable_migration(migration):
            return

        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s") % uuid)

        incoming = (migration.dest_compute == self.host and
                    migration.dest_node == self.nodename)
        outbound = (migration.source_compute == self.host and
                    migration.source_node == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration.old_instance_type_id):
                itype = self._get_instance_type(context, instance, 'new_',
                        migration.new_instance_type_id)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                        migration.old_instance_type_id)

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                    migration.new_instance_type_id)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                    migration.old_instance_type_id)

        if image_meta is None:
            image_meta = objects.ImageMeta.from_instance(instance)
        # TODO(jaypipes): Remove when image_meta is always passed
        # as an objects.ImageMeta
        elif not isinstance(image_meta, objects.ImageMeta):
            image_meta = objects.ImageMeta.from_dict(image_meta)

        if itype:
            host_topology = self.compute_node.get('numa_topology')
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(
                        host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = (
                    hardware.numa_fit_instance_to_host(
                        host_topology, numa_topology))
            usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(usage)
            if self.pci_tracker:
                obj = self.pci_tracker.stats.to_device_pools_obj()
                self.compute_node.pci_device_pools = obj
            else:
                obj = objects.PciDevicePoolList()
                self.compute_node.pci_device_pools = obj
            self.tracked_migrations[uuid] = (migration, itype)
Exemplo n.º 36
0
    def _locked_consume_from_request(self, spec_obj):
        disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus

        # WRS - extra_specs are needed in multiple places below
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # WRS: Support strict vs prefer allocation of PCI devices.
        pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                            'strict')
        pci_strict = False if pci_numa_affinity == 'prefer' else True

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = spec_obj.numa_topology

        spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats,
            pci_strict=pci_strict)
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests,
                                          instance_cells,
                                          pci_strict=pci_strict)

        # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
        # NUMA helpers because now we're 100% sure that spec_obj.numa_topology
        # is an InstanceNUMATopology object. Unfortunately, since
        # HostState.host_numa_topology is still limbo between an NUMATopology
        # object (when updated by consume_from_request), a ComputeNode object
        # (when updated by update_from_compute_node), we need to keep the call
        # to get_host_numa_usage_from_instance until it's fixed (and use a
        # temporary orphaned Instance object as a proxy)
        instance = objects.Instance(numa_topology=spec_obj.numa_topology)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance, strict=True)

        if self.is_ironic:
            # Consume node's entire resources regardless of instance request
            self.free_ram_mb = 0
            self.free_disk_mb = 0
            self.vcpus_used = self.vcpus_total
        else:
            # Get set of reserved thread sibling pcpus that cannot be allocated
            # when using 'isolate' cpu_thread_policy.
            reserved = hardware.get_reserved_thread_sibling_pcpus(
                instance_numa_topology, host_numa_topology)
            threads_per_core = hardware._get_threads_per_core(
                host_numa_topology)

            # WRS - normalized vCPU accounting
            vcpus = hardware.normalized_vcpus(
                vcpus=vcpus,
                reserved=reserved,
                extra_specs=extra_specs,
                image_props=image_props,
                ratio=self.cpu_allocation_ratio,
                threads_per_core=threads_per_core)

            self.free_ram_mb -= ram_mb
            self.free_disk_mb -= disk_mb
            self.vcpus_used += vcpus

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1

        # L3 CAT Support
        if ((instance.numa_topology is not None)
                and any(cell.cachetune_requested
                        for cell in instance.numa_topology.cells)):
            self.l3_closids_used += 1