def _satisfies_cpu_policy(self, host_state, extra_specs, image_props):
        """Check that the host_state provided satisfies any available
        CPU policy requirements.
        """
        host_topology, _ = hardware.host_topology_and_format_from_host(
            host_state)
        # NOTE(stephenfin): There can be conflicts between the policy
        # specified by the image and that specified by the instance, but this
        # is not the place to resolve these. We do this during scheduling.
        cpu_policy = [extra_specs.get('hw:cpu_policy'),
                      image_props.get('hw_cpu_policy')]
        cpu_thread_policy = [extra_specs.get('hw:cpu_thread_policy'),
                             image_props.get('hw_cpu_thread_policy')]

        if not host_topology:
            return True

        if fields.CPUAllocationPolicy.DEDICATED not in cpu_policy:
            return True

        if fields.CPUThreadAllocationPolicy.REQUIRE not in cpu_thread_policy:
            return True

        for cell in host_topology.cells:
            # the presence of siblings indicates hyperthreading (HT)
            if not cell.siblings:
                LOG.debug("%(host_state)s fails CPU policy requirements. "
                          "Host does not have hyperthreading or "
                          "hyperthreading is disabled, but 'require' threads "
                          "policy was requested.", {'host_state': host_state})
                return False

        return True
Beispiel #2
0
    def host_passes(self, host_state, filter_properties):
        ram_ratio = CONF.ram_allocation_ratio
        cpu_ratio = CONF.cpu_allocation_ratio
        instance = filter_properties.get('instance_properties', {})
        instance_topology = hardware.instance_topology_from_instance(instance)
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
                host_state)
        if instance_topology:
            if host_topology:
                if not hardware.VirtNUMAHostTopology.can_fit_instances(
                        host_topology, [instance_topology]):
                    return False

                limit_cells = []
                usage_after_instance = (
                        hardware.VirtNUMAHostTopology.usage_from_instances(
                            host_topology, [instance_topology]))
                for cell in usage_after_instance.cells:
                    max_cell_memory = int(cell.memory * ram_ratio)
                    max_cell_cpu = len(cell.cpuset) * cpu_ratio
                    if (cell.memory_usage > max_cell_memory or
                            cell.cpu_usage > max_cell_cpu):
                        return False
                    limit_cells.append(
                        hardware.VirtNUMATopologyCellLimit(
                            cell.id, cell.cpuset, cell.memory,
                            max_cell_cpu, max_cell_memory))
                host_state.limits['numa_topology'] = (
                        hardware.VirtNUMALimitTopology(
                            cells=limit_cells).to_json())
                return True
            else:
                return False
        else:
            return True
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(
             cpu_allocation_ratio=cpu_ratio,
             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits=limits,
                     pci_requests=pci_requests,
                     pci_stats=host_state.pci_stats))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         return False
     else:
         return True
Beispiel #4
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     if requested_topology and host_topology:
         limit_cells = []
         for cell in host_topology.cells:
             max_cell_memory = int(cell.memory * ram_ratio)
             max_cell_cpu = len(cell.cpuset) * cpu_ratio
             limit_cells.append(hardware.VirtNUMATopologyCellLimit(
                 cell.id, cell.cpuset, cell.memory,
                 max_cell_cpu, max_cell_memory))
         limits = hardware.VirtNUMALimitTopology(cells=limit_cells)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits_topology=limits))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits.to_json()
         instance['numa_topology'] = instance_topology
         return True
     elif requested_topology:
         return False
     else:
         return True
Beispiel #5
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
         host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
             host_topology,
             requested_topology,
             limits=limits,
             pci_requests=pci_requests,
             pci_stats=host_state.pci_stats))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         return False
     else:
         return True
Beispiel #6
0
 def _check_destination_has_enough_memory(self):
     avail = self._get_compute_info(self.destination)['free_ram_mb']
     mem_inst = self.instance.memory_mb
     free_huge = 0
     instance_uuid = self.instance.uuid
     dest = self.destination
     if not self.instance.system_metadata.get(
             'instance_type_extra_hw:mem_page_size'):
         host_topology, _fmt = hardware.host_topology_and_format_from_host(
             self._get_compute_info(self.destination))
         cells = host_topology.cells or []
         for cell in cells:
             for page in cell.mempages or []:
                 if page.size_kb != 4:
                     free_huge += (page.total - page.used) * \
                         page.size_kb / 1024
         avail = avail - free_huge
     if not mem_inst or avail <= mem_inst:
         reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
                    "Lack of memory(host:%(avail)s <= "
                    "instance:%(mem_inst)s)")
         raise exception.MigrationPreCheckError(
             reason=reason % dict(instance_uuid=instance_uuid,
                                  dest=dest,
                                  avail=avail,
                                  mem_inst=mem_inst))
Beispiel #7
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = CONF.ram_allocation_ratio
     cpu_ratio = CONF.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
         host_state)
     if requested_topology and host_topology:
         limit_cells = []
         for cell in host_topology.cells:
             max_cell_memory = int(cell.memory * ram_ratio)
             max_cell_cpu = len(cell.cpuset) * cpu_ratio
             limit_cells.append(
                 hardware.VirtNUMATopologyCellLimit(cell.id, cell.cpuset,
                                                    cell.memory,
                                                    max_cell_cpu,
                                                    max_cell_memory))
         limits = hardware.VirtNUMALimitTopology(cells=limit_cells)
         instance_topology = (hardware.numa_fit_instance_to_host(
             host_topology, requested_topology, limits_topology=limits))
         if not instance_topology:
             return False
         host_state.limits['numa_topology'] = limits.to_json()
         instance['numa_topology'] = instance_topology
         return True
     elif requested_topology:
         return False
     else:
         return True
    def host_passes(self, host_state, spec_obj):
        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Beispiel #9
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING
        ]:
            self.num_io_ops += 1
Beispiel #10
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance["root_gb"] + instance["ephemeral_gb"]) * 1024
        ram_mb = instance["memory_mb"]
        vcpus = instance["vcpus"]
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get("pci_requests")
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests["requests"] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests.from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(self)
        instance_numa_topology = hardware.instance_topology_from_instance(instance)

        instance["numa_topology"] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get("numa_topology"),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats,
        )
        if pci_requests:
            instance_cells = None
            if instance["numa_topology"]:
                instance_cells = instance["numa_topology"].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(self, instance)

        vm_state = instance.get("vm_state", vm_states.BUILDING)
        task_state = instance.get("task_state")
        if vm_state == vm_states.BUILDING or task_state in [
            task_states.RESIZE_MIGRATING,
            task_states.REBUILDING,
            task_states.RESIZE_PREP,
            task_states.IMAGE_SNAPSHOT,
            task_states.IMAGE_BACKUP,
            task_states.UNSHELVING,
            task_states.RESCUING,
        ]:
            self.num_io_ops += 1
Beispiel #11
0
    def host_passes(self, host_state, spec_obj):
        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
                host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(
                cpu_allocation_ratio=cpu_ratio,
                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                        host_topology, requested_topology,
                        limits=limits,
                        pci_requests=pci_requests,
                        pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug("%(host)s, %(node)s fails NUMA topology "
                          "requirements. The instance does not fit on this "
                          "host.", {'host': host_state.host,
                                    'node': host_state.nodename},
                          instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                      "No host NUMA topology while the instance specified "
                      "one.",
                      {'host': host_state.host, 'node': host_state.nodename},
                      instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Beispiel #12
0
    def consume_from_request(self, spec_obj):
        """Incrementally update host state from an RequestSpec object."""
        disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = spec_obj.numa_topology

        spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
        # NUMA helpers because now we're 100% sure that spec_obj.numa_topology
        # is an InstanceNUMATopology object. Unfortunately, since
        # HostState.host_numa_topology is still limbo between an NUMATopology
        # object (when updated by consume_from_request), a ComputeNode object
        # (when updated by update_from_compute_node), we need to keep the call
        # to get_host_numa_usage_from_instance until it's fixed (and use a
        # temporary orphaned Instance object as a proxy)
        instance = objects.Instance(numa_topology=spec_obj.numa_topology)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Beispiel #13
0
    def consume_from_request(self, spec_obj):
        """Incrementally update host state from an RequestSpec object."""
        disk_mb = (spec_obj.root_gb +
                   spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = spec_obj.numa_topology

        spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
        # NUMA helpers because now we're 100% sure that spec_obj.numa_topology
        # is an InstanceNUMATopology object. Unfortunately, since
        # HostState.host_numa_topology is still limbo between an NUMATopology
        # object (when updated by consume_from_request), a ComputeNode object
        # (when updated by update_from_compute_node), we need to keep the call
        # to get_host_numa_usage_from_instance until it's fixed (and use a
        # temporary orphaned Instance object as a proxy)
        instance = objects.Instance(numa_topology=spec_obj.numa_topology)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Beispiel #14
0
    def host_passes(self, host_state, spec_obj):
        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats,
                flavor=spec_obj.flavor))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Beispiel #15
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING]:
            self.num_io_ops += 1
Beispiel #16
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        now = timeutils.utcnow()
        # NOTE(sbauza): Objects are UTC tz-aware by default
        self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())

        # Track number of instances on host
        self.num_instances += 1

        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. Thus, it has a .pci_requests field, which gets converted
        # to a primitive early on, and is thus a dict here. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = pci_requests.requests
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        vm_state = instance.get('vm_state', vm_states.BUILDING)
        task_state = instance.get('task_state')
        if vm_state == vm_states.BUILDING or task_state in [
                task_states.RESIZE_MIGRATING, task_states.REBUILDING,
                task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
                task_states.IMAGE_BACKUP, task_states.UNSHELVING,
                task_states.RESCUING]:
            self.num_io_ops += 1
Beispiel #17
0
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        # NOTE(danms): Instance here is still a dict, which is converted from
        # an object. The pci_requests are a dict as well. Convert this when
        # we get an object all the way to this path.
        if pci_requests and pci_requests['requests'] and self.pci_stats:
            pci_requests = objects.InstancePCIRequests \
                .from_request_spec_instance_props(pci_requests)
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_instance() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Beispiel #18
0
 def test_mem_in_resource_tracker(self, context, instance):
     avail = self.compute_node["free_ram_mb"]
     mem_inst = instance.memory_mb
     free_huge = 0
     if not instance.system_metadata.get("instance_type_extra_hw:mem_page_size"):
         host_topology, _fmt = hardware.host_topology_and_format_from_host(self.compute_node)
         cells = host_topology.cells or []
         for cell in cells:
             for page in cell.mempages or []:
                 if page.size_kb != 4:
                     free_huge += (page.total - page.used) * page.size_kb / 1024
         avail = avail - free_huge
     if not mem_inst or avail <= mem_inst:
         raise exception.NovaException(
             "Lack of memory(host:%(avail)s <= " "instance:%(mem_inst)s)" % dict(avail=avail, mem_inst=mem_inst)
         )
Beispiel #19
0
    def _update_instance_topology(self, instance, chosen_host):
        if not instance.get('numa_topology'):
            return

        numa_limits = chosen_host.obj.limits.get('numa_topology', {})
        if isinstance(numa_limits, six.string_types):
            limits = hardware.VirtNUMALimitTopology.from_json(numa_limits)
        else:
            limits = hardware.VirtNUMALimitTopology(numa_limits.get('cells'))

        host_topology, __ = hardware.host_topology_and_format_from_host(
            chosen_host.obj)
        instance_topology = hardware.instance_topology_from_instance(instance)
        instance_topology = (hardware.numa_fit_instance_to_host(
            host_topology, instance_topology, limits_topology=limits))
        if instance_topology:
            instance['numa_topology'] = instance_topology
    def _satisfies_cpu_policy(self, host_state, extra_specs, image_props,
                              details):
        """Check that the host_state provided satisfies any available
        CPU policy requirements.
        """
        host_topology, _ = hardware.host_topology_and_format_from_host(
            host_state)
        # NOTE(stephenfin): There can be conflicts between the policy
        # specified by the image and that specified by the instance, but this
        # is not the place to resolve these. We do this during scheduling.
        cpu_policy = [
            extra_specs.get('hw:cpu_policy'),
            image_props.get('hw_cpu_policy')
        ]
        cpu_thread_policy = [
            extra_specs.get('hw:cpu_thread_policy'),
            image_props.get('hw_cpu_thread_policy')
        ]

        if not host_topology:
            return True

        if fields.CPUAllocationPolicy.DEDICATED not in cpu_policy:
            return True

        if fields.CPUThreadAllocationPolicy.REQUIRE not in cpu_thread_policy:
            return True

        # the presence of siblings in at least one cell indicates
        # hyperthreading (HT)
        has_hyperthreading = any(cell.siblings for cell in host_topology.cells)

        if not has_hyperthreading:
            LOG.debug(
                "%(host_state)s fails CPU policy requirements. "
                "Host does not have hyperthreading or "
                "hyperthreading is disabled, but 'require' threads "
                "policy was requested.", {'host_state': host_state})
            msg = ("Requested threads policy: '%s'; from "
                   "flavor or image is not allowed on "
                   "non-hyperthreaded host" % cpu_thread_policy)
            details = utils.details_append(details, msg)
            return False

        return True
Beispiel #21
0
    def host_passes(self, host_state, spec_obj):
        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
                host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(
                cpu_allocation_ratio=cpu_ratio,
                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                        host_topology, requested_topology,
                        limits=limits,
                        pci_requests=pci_requests,
                        pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug("%(host)s, %(node)s fails NUMA topology "
                          "requirements. The instance does not fit on this "
                          "host.", {'host': host_state.host,
                                    'node': host_state.nodename},
                          instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                      "No host NUMA topology while the instance specified "
                      "one.",
                      {'host': host_state.host, 'node': host_state.nodename},
                      instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
Beispiel #22
0
 def test_mem_in_resource_tracker(self, context, instance):
     avail = self.compute_node['free_ram_mb']
     mem_inst = instance.memory_mb
     free_huge = 0
     if not instance.system_metadata.get(
             'instance_type_extra_hw:mem_page_size'):
         host_topology, _fmt = hardware.host_topology_and_format_from_host(
             self.compute_node)
         cells = host_topology.cells or []
         for cell in cells:
             for page in cell.mempages or []:
                 if page.size_kb != 4:
                     free_huge += (page.total - page.used) * \
                         page.size_kb / 1024
         avail = avail - free_huge
     if not mem_inst or avail <= mem_inst:
         raise exception.NovaException("Lack of memory(host:%(avail)s <= "
                                       "instance:%(mem_inst)s)" %
                                       dict(avail=avail, mem_inst=mem_inst))
    def consume_from_instance(self, instance):
        """Incrementally update host state from an instance."""
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
        ram_mb = instance['memory_mb']
        vcpus = instance['vcpus']
        self.free_ram_mb -= ram_mb
        self.free_disk_mb -= disk_mb
        self.vcpus_used += vcpus

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = instance.get('pci_requests')
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
                                self)
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)

        instance['numa_topology'] = hardware.numa_fit_instance_to_host(
            host_numa_topology, instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests, pci_stats=self.pci_stats)
        if pci_requests:
            instance_cells = None
            if instance['numa_topology']:
                instance_cells = instance['numa_topology'].cells
            self.pci_stats.apply_requests(pci_requests, instance_cells)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
                self, instance)

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_instance() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1
Beispiel #24
0
    def _update_instance_topology(self, instance, chosen_host):
        if not instance.get('numa_topology'):
            return

        numa_limits = chosen_host.obj.limits.get('numa_topology', {})
        if isinstance(numa_limits, six.string_types):
            limits = hardware.VirtNUMALimitTopology.from_json(numa_limits)
        else:
            limits = hardware.VirtNUMALimitTopology(
                numa_limits.get('cells'))

        host_topology, __ = hardware.host_topology_and_format_from_host(
            chosen_host.obj)
        instance_topology = hardware.instance_topology_from_instance(
            instance)
        instance_topology = (
            hardware.numa_fit_instance_to_host(
                host_topology, instance_topology,
                limits_topology=limits))
        if instance_topology:
            instance['numa_topology'] = instance_topology
Beispiel #25
0
    def host_passes(self, host_state, filter_properties):
        """Only return hosts with sufficient available RAM."""
        instance_type = filter_properties.get('instance_type')
        requested_ram = instance_type['memory_mb']
        free_ram_mb = host_state.free_ram_mb
        total_usable_ram_mb = host_state.total_usable_ram_mb
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        # need updated
        free_page_mem = 0
        total_page_mem = 0
        if host_topology is not None and host_topology.cells:
            for cell in host_topology.cells:
                for page in cell.mempages:
                    if page.size_kb != 4:
                        free_page_mem += (page.total - page.used) * \
                            page.size_kb / 1024
                        total_page_mem += page.total * page.size_kb / 1024
        total_usable_ram_mb = total_usable_ram_mb - total_page_mem
        ram_allocation_ratio = self._get_ram_allocation_ratio(host_state,
                                                          filter_properties)

        memory_mb_limit = total_usable_ram_mb * ram_allocation_ratio
        used_ram_mb = total_usable_ram_mb - (free_ram_mb - free_page_mem)
        usable_ram = memory_mb_limit - used_ram_mb
        if filter_properties['instance_type'].\
                get('extra_specs', {}).get('hw:mem_page_size'):
            LOG.debug("Huge page vm willn't use ram filter.")
            return True
        if not usable_ram >= requested_ram:
            LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
                    "usable ram, it only has %(usable_ram)s MB usable ram.",
                    {'host_state': host_state,
                     'requested_ram': requested_ram,
                     'usable_ram': usable_ram})
            return False

        # save oversubscription limit for compute node to test against:
        host_state.limits['memory_mb'] = memory_mb_limit
        return True
 def host_passes(self, host_state, spec_obj):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     requested_topology = spec_obj.numa_topology
     host_topology, _fmt = hardware.host_topology_and_format_from_host(host_state)
     pci_requests = spec_obj.pci_requests
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio, ram_allocation_ratio=ram_ratio)
         instance_topology = hardware.numa_fit_instance_to_host(
             host_topology,
             requested_topology,
             limits=limits,
             pci_requests=pci_requests,
             pci_stats=host_state.pci_stats,
         )
         if not instance_topology:
             LOG.debug(
                 "%(host)s, %(node)s fails NUMA topology "
                 "requirements. The instance does not fit on this "
                 "host.",
                 {"host": host_state.host, "node": host_state.nodename},
                 instance_uuid=spec_obj.instance_uuid,
             )
             return False
         host_state.limits["numa_topology"] = limits
         return True
     elif requested_topology:
         LOG.debug(
             "%(host)s, %(node)s fails NUMA topology requirements. "
             "No host NUMA topology while the instance specified "
             "one.",
             {"host": host_state.host, "node": host_state.nodename},
             instance_uuid=spec_obj.instance_uuid,
         )
         return False
     else:
         return True
Beispiel #27
0
 def host_passes(self, host_state, filter_properties):
     ram_ratio = host_state.ram_allocation_ratio
     cpu_ratio = host_state.cpu_allocation_ratio
     request_spec = filter_properties.get('request_spec', {})
     instance = request_spec.get('instance_properties', {})
     requested_topology = hardware.instance_topology_from_instance(instance)
     host_topology, _fmt = hardware.host_topology_and_format_from_host(
             host_state)
     pci_requests = instance.get('pci_requests')
     if pci_requests:
         pci_requests = pci_requests.requests
     if requested_topology and host_topology:
         limits = objects.NUMATopologyLimits(
             cpu_allocation_ratio=cpu_ratio,
             ram_allocation_ratio=ram_ratio)
         instance_topology = (hardware.numa_fit_instance_to_host(
                     host_topology, requested_topology,
                     limits=limits,
                     pci_requests=pci_requests,
                     pci_stats=host_state.pci_stats))
         if not instance_topology:
             LOG.debug("%(host)s, %(node)s fails NUMA topology "
                       "requirements. The instance does not fit on this "
                       "host.", {'host': host_state.host,
                                 'node': host_state.nodename},
                       instance_uuid=instance.get('instance_uuid'))
             return False
         host_state.limits['numa_topology'] = limits
         return True
     elif requested_topology:
         LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
                   "No host NUMA topology while the instance specified "
                   "one.",
                   {'host': host_state.host, 'node': host_state.nodename},
                   instance_uuid=instance.get('instance_uuid'))
         return False
     else:
         return True
Beispiel #28
0
def normalized_resources_for_placement_claim(resources, compute_node, vcpus,
                                             extra_specs, image_props,
                                             instance_numa_topology):
    normalized_resources = copy.deepcopy(resources)
    # Get host numa topology
    host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
        compute_node)
    # Get set of reserved thread sibling pcpus that cannot be allocated
    # when using 'isolate' cpu_thread_policy.
    reserved = hardware.get_reserved_thread_sibling_pcpus(
        instance_numa_topology, host_numa_topology)
    threads_per_core = hardware._get_threads_per_core(host_numa_topology)
    # As placement service only supports integer allocation, multiply floating
    # vcpus from normalized_vcpus by cpu_allocation_ratio.  This means one
    # dedicated vcpu will be allocated 16 while 1 floating cpu would be
    # allocated 1.
    normalized_resources[fields.ResourceClass.VCPU] = \
        int(CONF.cpu_allocation_ratio * hardware.normalized_vcpus(vcpus=vcpus,
                                            reserved=reserved,
                                            extra_specs=extra_specs,
                                            image_props=image_props,
                                            ratio=CONF.cpu_allocation_ratio,
                                            threads_per_core=threads_per_core))
    return normalized_resources
 def _check_destination_has_enough_memory(self):
     avail = self._get_compute_info(self.destination)['free_ram_mb']
     mem_inst = self.instance.memory_mb
     free_huge = 0
     instance_uuid = self.instance.uuid
     dest = self.destination
     if not self.instance.system_metadata.get(
             'instance_type_extra_hw:mem_page_size'):
         host_topology, _fmt = hardware.host_topology_and_format_from_host(
             self._get_compute_info(self.destination))
         cells = host_topology.cells or []
         for cell in cells:
             for page in cell.mempages or []:
                 if page.size_kb != 4:
                     free_huge += (page.total - page.used) * \
                         page.size_kb / 1024
         avail = avail - free_huge
     if not mem_inst or avail <= mem_inst:
         reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
                    "Lack of memory(host:%(avail)s <= "
                    "instance:%(mem_inst)s)")
         raise exception.MigrationPreCheckError(reason=reason % dict(
                 instance_uuid=instance_uuid, dest=dest, avail=avail,
                 mem_inst=mem_inst))
Beispiel #30
0
    def host_passes(self, host_state, spec_obj):
        """Return True if host has sufficient CPU cores.

        :param host_state: nova.scheduler.host_manager.HostState
        :param spec_obj: filter options
        :return: boolean
        """
        if not host_state.vcpus_total:
            # Fail safe
            LOG.warning(_LW("VCPUs not set; assuming CPU collection broken"))
            return True

        instance_vcpus = spec_obj.vcpus
        cpu_allocation_ratio = self._get_cpu_allocation_ratio(
            host_state, spec_obj)
        vcpus_total = host_state.vcpus_total

        # WRS: this will be needed further down
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties

        # Only provide a VCPU limit to compute if the virt driver is reporting
        # an accurate count of installed VCPUs. (XenServer driver does not)
        if vcpus_total > 0:
            host_state.limits['vcpu'] = vcpus_total

            # Do not allow an instance to overcommit against itself, only
            # against other instances.
            unshared_vcpus = hardware.unshared_vcpus(instance_vcpus,
                                                     extra_specs)
            if unshared_vcpus > host_state.vcpus_total:
                LOG.debug(
                    "%(host_state)s does not have %(instance_vcpus)d "
                    "unshared cpus before overcommit, it only has %(cpus)d", {
                        'host_state': host_state,
                        'instance_vcpus': unshared_vcpus,
                        'cpus': host_state.vcpus_total
                    })
                msg = ('Insufficient total vcpus: req:%(req)s, '
                       'avail:%(cpus)s' % {
                           'req': instance_vcpus,
                           'cpus': host_state.vcpus_total
                       })
                self.filter_reject(host_state, spec_obj, msg)
                return False

        free_vcpus = vcpus_total - host_state.vcpus_used
        # WRS:extension - normalized vCPU accounting.  host_state.vcpus_used
        # is now reported in floating-point.
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        threads_per_core = hardware._get_threads_per_core(host_numa_topology)
        normalized_instance_vcpus = hardware.normalized_vcpus(
            vcpus=instance_vcpus,
            reserved=set(),
            extra_specs=extra_specs,
            image_props=image_props,
            ratio=cpu_allocation_ratio,
            threads_per_core=threads_per_core)
        if free_vcpus < normalized_instance_vcpus:
            LOG.debug(
                "%(host_state)s does not have %(instance_vcpus)f "
                "usable vcpus, it only has %(free_vcpus)f usable "
                "vcpus", {
                    'host_state': host_state,
                    'instance_vcpus': normalized_instance_vcpus,
                    'free_vcpus': free_vcpus
                })
            msg = ('Insufficient vcpus: req:%(req)s, avail:%(avail)s' % {
                'req': instance_vcpus,
                'avail': free_vcpus
            })
            self.filter_reject(host_state, spec_obj, msg)
            return False

        return True
Beispiel #31
0
    def _locked_consume_from_request(self, spec_obj):
        disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024
        ram_mb = spec_obj.memory_mb
        vcpus = spec_obj.vcpus

        # WRS - extra_specs are needed in multiple places below
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties

        # Track number of instances on host
        self.num_instances += 1

        pci_requests = spec_obj.pci_requests
        if pci_requests and self.pci_stats:
            pci_requests = pci_requests.requests
        else:
            pci_requests = None

        # WRS: Support strict vs prefer allocation of PCI devices.
        pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                            'strict')
        pci_strict = False if pci_numa_affinity == 'prefer' else True

        # Calculate the numa usage
        host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
            self)
        instance_numa_topology = spec_obj.numa_topology

        spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
            host_numa_topology,
            instance_numa_topology,
            limits=self.limits.get('numa_topology'),
            pci_requests=pci_requests,
            pci_stats=self.pci_stats,
            pci_strict=pci_strict)
        if pci_requests:
            instance_cells = None
            if spec_obj.numa_topology:
                instance_cells = spec_obj.numa_topology.cells
            self.pci_stats.apply_requests(pci_requests,
                                          instance_cells,
                                          pci_strict=pci_strict)

        # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
        # NUMA helpers because now we're 100% sure that spec_obj.numa_topology
        # is an InstanceNUMATopology object. Unfortunately, since
        # HostState.host_numa_topology is still limbo between an NUMATopology
        # object (when updated by consume_from_request), a ComputeNode object
        # (when updated by update_from_compute_node), we need to keep the call
        # to get_host_numa_usage_from_instance until it's fixed (and use a
        # temporary orphaned Instance object as a proxy)
        instance = objects.Instance(numa_topology=spec_obj.numa_topology)

        self.numa_topology = hardware.get_host_numa_usage_from_instance(
            self, instance, strict=True)

        if self.is_ironic:
            # Consume node's entire resources regardless of instance request
            self.free_ram_mb = 0
            self.free_disk_mb = 0
            self.vcpus_used = self.vcpus_total
        else:
            # Get set of reserved thread sibling pcpus that cannot be allocated
            # when using 'isolate' cpu_thread_policy.
            reserved = hardware.get_reserved_thread_sibling_pcpus(
                instance_numa_topology, host_numa_topology)
            threads_per_core = hardware._get_threads_per_core(
                host_numa_topology)

            # WRS - normalized vCPU accounting
            vcpus = hardware.normalized_vcpus(
                vcpus=vcpus,
                reserved=reserved,
                extra_specs=extra_specs,
                image_props=image_props,
                ratio=self.cpu_allocation_ratio,
                threads_per_core=threads_per_core)

            self.free_ram_mb -= ram_mb
            self.free_disk_mb -= disk_mb
            self.vcpus_used += vcpus

        # NOTE(sbauza): By considering all cases when the scheduler is called
        # and when consume_from_request() is run, we can safely say that there
        # is always an IO operation because we want to move the instance
        self.num_io_ops += 1

        # L3 CAT Support
        if ((instance.numa_topology is not None)
                and any(cell.cachetune_requested
                        for cell in instance.numa_topology.cells)):
            self.l3_closids_used += 1
    def host_passes(self, host_state, spec_obj):
        # WRS - disable this filter for non-libvirt hypervisor
        if not utils.is_libvirt_compute(host_state):
            return True

        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        # Note that we still need to pass the original spec_obj to
        # filter_reject so the error message persists.
        cloned_spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = cloned_spec_obj.flavor.extra_specs
        image_props = cloned_spec_obj.image.properties
        requested_topology = cloned_spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = cloned_spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        details = utils.details_initialize(details=None)

        if not self._satisfies_cpu_policy(
                host_state, extra_specs, image_props, details=details):
            msg = 'Host not useable. ' + ', '.join(details.get('reason', []))
            self.filter_reject(host_state, spec_obj, msg)
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)

            # WRS: Support strict vs prefer allocation of PCI devices.
            pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                                'strict')
            pci_strict = False if pci_numa_affinity == 'prefer' else True

            # L3 CAT Support
            if any(cell.cachetune_requested
                   for cell in requested_topology.cells):
                free_closids = (host_state.l3_closids -
                                host_state.l3_closids_used)
                if free_closids < 1:
                    msg = ('Insufficient L3 closids: '
                           'req:%(req)s, avail:%(avail)s' % {
                               'req': 1,
                               'avail': free_closids
                           })
                    self.filter_reject(host_state, spec_obj, msg)
                    return False
                # save limit for compute node to test against
                host_state.limits['closids'] = host_state.l3_closids

            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats,
                details=details,
                pci_strict=pci_strict))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                msg = details.get('reason', [])
                self.filter_reject(host_state, spec_obj, msg)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            msg = 'Missing host topology'
            self.filter_reject(host_state, spec_obj, msg)
            return False
        else:
            return True