def test_siblings(self):
        inst_cell = objects.InstanceNUMACell(
                cpuset=set([0, 1, 2]))
        self.assertEqual([], inst_cell.siblings)

        topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=0)
        inst_cell = objects.InstanceNUMACell(
                cpuset=set([0, 1, 2]), cpu_topology=topo)
        self.assertEqual([], inst_cell.siblings)

        # One thread actually means no threads
        topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
        inst_cell = objects.InstanceNUMACell(
                cpuset=set([0, 1, 2]), cpu_topology=topo)
        self.assertEqual([], inst_cell.siblings)

        topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
        inst_cell = objects.InstanceNUMACell(
                cpuset=set([0, 1, 2, 3]), cpu_topology=topo)
        self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)

        topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
        inst_cell = objects.InstanceNUMACell(
                cpuset=set([0, 1, 2, 3]), cpu_topology=topo)
        self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)
 def test_object_to_dict(self):
     top_obj = objects.VirtCPUTopology()
     top_obj.sockets = 2
     top_obj.cores = 4
     top_obj.threads = 8
     spec = top_obj.to_dict()
     self.assertEqual(_top_dict, spec)
Example #3
0
def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id):
    """Pack an instance onto a set of siblings

    :param available_siblings: list of sets of CPU id's - available
                               siblings per core
    :param instance_cell: An instance of objects.InstanceNUMACell describing
                          the pinning requirements of the instance

    :returns: An instance of objects.InstanceNUMACell containing the pinning
              information, and potentially a new topology to be exposed to the
              instance. None if there is no valid way to satisfy the sibling
              requirements for the instance.

    This method will calculate the pinning for the given instance and it's
    topology, making sure that hyperthreads of the instance match up with
    those of the host when the pinning takes effect.
    """

    # We build up a data structure 'can_pack' that answers the question:
    # 'Given the number of threads I want to pack, give me a list of all
    # the available sibling sets that can accommodate it'
    can_pack = collections.defaultdict(list)
    for sib in available_siblings:
        for threads_no in range(1, len(sib) + 1):
            can_pack[threads_no].append(sib)

    def _can_pack_instance_cell(instance_cell, threads_per_core, cores_list):
        """Determines if instance cell can fit an avail set of cores."""

        if threads_per_core * len(cores_list) < len(instance_cell):
            return False
        if instance_cell.siblings:
            return instance_cell.cpu_topology.threads <= threads_per_core
        else:
            return len(instance_cell) % threads_per_core == 0

    # We iterate over the can_pack dict in descending order of cores that
    # can be packed - an attempt to get even distribution over time
    for cores_per_sib, sib_list in sorted(
            (t for t in can_pack.items()), reverse=True):
        if _can_pack_instance_cell(instance_cell,
                                   cores_per_sib, sib_list):
            sliced_sibs = map(lambda s: list(s)[:cores_per_sib], sib_list)
            if instance_cell.siblings:
                pinning = zip(itertools.chain(*instance_cell.siblings),
                              itertools.chain(*sliced_sibs))
            else:
                pinning = zip(sorted(instance_cell.cpuset),
                              itertools.chain(*sliced_sibs))

            # NOTE(sfinucan) - this may be overriden later on by the drivers
            topology = (instance_cell.cpu_topology or
                        objects.VirtCPUTopology(sockets=1,
                                                cores=len(sliced_sibs),
                                                threads=cores_per_sib))
            instance_cell.pin_vcpus(*pinning)
            instance_cell.cpu_topology = topology
            instance_cell.id = host_cell_id
            return instance_cell
Example #4
0
 def _get_topology_for_vcpus(vcpus, sockets, cores, threads):
     if threads * cores * sockets == vcpus:
         return objects.VirtCPUTopology(sockets=sockets,
                                        cores=cores,
                                        threads=threads)
Example #5
0
def _get_cpu_topology_constraints(flavor, image_meta):
    """Get the topology constraints declared in flavor or image

    :param flavor: Flavor object to read extra specs from
    :param image_meta: Image object to read image metadata from

    Gets the topology constraints from the configuration defined
    in the flavor extra specs or the image metadata. In the flavor
    this will look for

     hw:cpu_sockets - preferred socket count
     hw:cpu_cores - preferred core count
     hw:cpu_threads - preferred thread count
     hw:cpu_max_sockets - maximum socket count
     hw:cpu_max_cores - maximum core count
     hw:cpu_max_threads - maximum thread count

    In the image metadata this will look at

     hw_cpu_sockets - preferred socket count
     hw_cpu_cores - preferred core count
     hw_cpu_threads - preferred thread count
     hw_cpu_max_sockets - maximum socket count
     hw_cpu_max_cores - maximum core count
     hw_cpu_max_threads - maximum thread count

    The image metadata must be strictly lower than any values
    set in the flavor. All values are, however, optional.

    This will return a pair of nova.objects.VirtCPUTopology instances,
    the first giving the preferred socket/core/thread counts,
    and the second giving the upper limits on socket/core/
    thread counts.

    exception.ImageVCPULimitsRangeExceeded will be raised
    if the maximum counts set against the image exceed
    the maximum counts set against the flavor

    exception.ImageVCPUTopologyRangeExceeded will be raised
    if the preferred counts set against the image exceed
    the maximum counts set against the image or flavor

    :returns: (preferred topology, maximum topology)
    """

    # Obtain the absolute limits from the flavor
    flvmaxsockets = int(flavor.extra_specs.get("hw:cpu_max_sockets", 65536))
    flvmaxcores = int(flavor.extra_specs.get("hw:cpu_max_cores", 65536))
    flvmaxthreads = int(flavor.extra_specs.get("hw:cpu_max_threads", 65536))

    LOG.debug("Flavor limits %(sockets)d:%(cores)d:%(threads)d", {
        "sockets": flvmaxsockets,
        "cores": flvmaxcores,
        "threads": flvmaxthreads
    })

    # Get any customized limits from the image
    maxsockets = int(
        image_meta.get("properties", {}).get("hw_cpu_max_sockets",
                                             flvmaxsockets))
    maxcores = int(
        image_meta.get("properties", {}).get("hw_cpu_max_cores", flvmaxcores))
    maxthreads = int(
        image_meta.get("properties", {}).get("hw_cpu_max_threads",
                                             flvmaxthreads))

    LOG.debug("Image limits %(sockets)d:%(cores)d:%(threads)d", {
        "sockets": maxsockets,
        "cores": maxcores,
        "threads": maxthreads
    })

    # Image limits are not permitted to exceed the flavor
    # limits. ie they can only lower what the flavor defines
    if ((maxsockets > flvmaxsockets) or (maxcores > flvmaxcores)
            or (maxthreads > flvmaxthreads)):
        raise exception.ImageVCPULimitsRangeExceeded(sockets=maxsockets,
                                                     cores=maxcores,
                                                     threads=maxthreads,
                                                     maxsockets=flvmaxsockets,
                                                     maxcores=flvmaxcores,
                                                     maxthreads=flvmaxthreads)

    # Get any default preferred topology from the flavor
    flvsockets = int(flavor.extra_specs.get("hw:cpu_sockets", -1))
    flvcores = int(flavor.extra_specs.get("hw:cpu_cores", -1))
    flvthreads = int(flavor.extra_specs.get("hw:cpu_threads", -1))

    LOG.debug("Flavor pref %(sockets)d:%(cores)d:%(threads)d", {
        "sockets": flvsockets,
        "cores": flvcores,
        "threads": flvthreads
    })

    # If the image limits have reduced the flavor limits
    # we might need to discard the preferred topology
    # from the flavor
    if ((flvsockets > maxsockets) or (flvcores > maxcores)
            or (flvthreads > maxthreads)):
        flvsockets = flvcores = flvthreads = -1

    # Finally see if the image has provided a preferred
    # topology to use
    sockets = int(image_meta.get("properties", {}).get("hw_cpu_sockets", -1))
    cores = int(image_meta.get("properties", {}).get("hw_cpu_cores", -1))
    threads = int(image_meta.get("properties", {}).get("hw_cpu_threads", -1))

    LOG.debug("Image pref %(sockets)d:%(cores)d:%(threads)d", {
        "sockets": sockets,
        "cores": cores,
        "threads": threads
    })

    # Image topology is not permitted to exceed image/flavor
    # limits
    if ((sockets > maxsockets) or (cores > maxcores)
            or (threads > maxthreads)):
        raise exception.ImageVCPUTopologyRangeExceeded(sockets=sockets,
                                                       cores=cores,
                                                       threads=threads,
                                                       maxsockets=maxsockets,
                                                       maxcores=maxcores,
                                                       maxthreads=maxthreads)

    # If no preferred topology was set against the image
    # then use the preferred topology from the flavor
    # We use 'and' not 'or', since if any value is set
    # against the image this invalidates the entire set
    # of values from the flavor
    if sockets == -1 and cores == -1 and threads == -1:
        sockets = flvsockets
        cores = flvcores
        threads = flvthreads

    LOG.debug(
        "Chosen %(sockets)d:%(cores)d:%(threads)d limits "
        "%(maxsockets)d:%(maxcores)d:%(maxthreads)d", {
            "sockets": sockets,
            "cores": cores,
            "threads": threads,
            "maxsockets": maxsockets,
            "maxcores": maxcores,
            "maxthreads": maxthreads
        })

    return (objects.VirtCPUTopology(sockets=sockets,
                                    cores=cores,
                                    threads=threads),
            objects.VirtCPUTopology(sockets=maxsockets,
                                    cores=maxcores,
                                    threads=maxthreads))
Example #6
0
def _get_possible_cpu_topologies(vcpus, maxtopology, allow_threads):
    """Get a list of possible topologies for a vCPU count
    :param vcpus: total number of CPUs for guest instance
    :param maxtopology: nova.objects.VirtCPUTopology for upper limits
    :param allow_threads: if the hypervisor supports CPU threads

    Given a total desired vCPU count and constraints on the
    maximum number of sockets, cores and threads, return a
    list of nova.objects.VirtCPUTopology instances that represent every
    possible topology that satisfies the constraints.

    exception.ImageVCPULimitsRangeImpossible is raised if
    it is impossible to achieve the total vcpu count given
    the maximum limits on sockets, cores & threads.

    :returns: list of nova.objects.VirtCPUTopology instances
    """

    # Clamp limits to number of vcpus to prevent
    # iterating over insanely large list
    maxsockets = min(vcpus, maxtopology.sockets)
    maxcores = min(vcpus, maxtopology.cores)
    maxthreads = min(vcpus, maxtopology.threads)

    if not allow_threads:
        maxthreads = 1

    LOG.debug("Build topologies for %(vcpus)d vcpu(s) "
              "%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
              {"vcpus": vcpus, "maxsockets": maxsockets,
               "maxcores": maxcores, "maxthreads": maxthreads})

    # Figure out all possible topologies that match
    # the required vcpus count and satisfy the declared
    # limits. If the total vCPU count were very high
    # it might be more efficient to factorize the vcpu
    # count and then only iterate over its factors, but
    # that's overkill right now
    possible = []
    for s in range(1, maxsockets + 1):
        for c in range(1, maxcores + 1):
            for t in range(1, maxthreads + 1):
                if t * c * s == vcpus:
                    o = objects.VirtCPUTopology(sockets=s, cores=c,
                                                threads=t)

                    possible.append(o)

    # We want to
    #  - Minimize threads (ie larger sockets * cores is best)
    #  - Prefer sockets over cores
    possible = sorted(possible, reverse=True,
                      key=lambda x: (x.sockets * x.cores,
                                     x.sockets,
                                     x.threads))

    LOG.debug("Got %d possible topologies", len(possible))
    if len(possible) == 0:
        raise exception.ImageVCPULimitsRangeImpossible(vcpus=vcpus,
                                                       sockets=maxsockets,
                                                       cores=maxcores,
                                                       threads=maxthreads)

    return possible
    def test_siblings(self):
        # default thread number of VirtualCPUTopology is one, one thread means
        # no thread and no sibling
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
                                             pcpuset=set())
        self.assertEqual([], inst_cell.siblings)
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
                                             pcpuset=set([4, 5, 6]))
        self.assertEqual([], inst_cell.siblings)

        # 'threads=0' means no sibling
        topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=0)
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
                                             pcpuset=set(),
                                             cpu_topology=topo)
        self.assertEqual([], inst_cell.siblings)
        inst_cell = objects.InstanceNUMACell(cpuset=set(),
                                             pcpuset=set([0, 1, 2]),
                                             cpu_topology=topo)
        self.assertEqual([], inst_cell.siblings)

        # One thread actually means no threads
        topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
                                             pcpuset=set(),
                                             cpu_topology=topo)
        self.assertEqual([], inst_cell.siblings)
        inst_cell = objects.InstanceNUMACell(cpuset=set(),
                                             pcpuset=set([0, 1, 2]),
                                             cpu_topology=topo)
        self.assertEqual([], inst_cell.siblings)

        # 2 threads per virtual core, and numa node has only one type CPU
        # pinned and un-pinned.
        topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
                                             pcpuset=set(),
                                             cpu_topology=topo)
        self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
        inst_cell = objects.InstanceNUMACell(cpuset=set(),
                                             pcpuset=set([0, 1, 2, 3]),
                                             cpu_topology=topo)
        self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)

        # 4 threads per virtual core, numa node has only one type CPU
        topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
                                             pcpuset=set(),
                                             cpu_topology=topo)
        self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)
        inst_cell = objects.InstanceNUMACell(cpuset=set(),
                                             pcpuset=set([0, 1, 2, 3]),
                                             cpu_topology=topo)
        self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)

        # 2 threads per virtual core, numa node with two type CPUs, the pinned
        # and un-pinned
        topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
        inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1]),
                                             pcpuset=set([2, 3]),
                                             cpu_topology=topo)
        self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
from nova.compute import cpumodel
from nova import objects
from nova.tests.unit.objects import test_objects

fake_cpu_model_feature = {
    'policy': cpumodel.POLICY_REQUIRE,
    'name': 'sse2',
}

fake_cpu_model_feature_obj = objects.VirtCPUFeature(**fake_cpu_model_feature)

fake_vcpumodel_dict = {
    'arch': arch.I686,
    'vendor': 'fake-vendor',
    'match': cpumodel.MATCH_EXACT,
    'topology': objects.VirtCPUTopology(sockets=1, cores=1, threads=1),
    'features': [fake_cpu_model_feature_obj],
    'mode': cpumodel.MODE_HOST_MODEL,
    'model': 'fake-model',
}
fake_vcpumodel = objects.VirtCPUModel(**fake_vcpumodel_dict)


class _TestVirtCPUFeatureObj(object):
    def test_policy_limitation(self):
        obj = objects.VirtCPUFeature()
        self.assertRaises(ValueError, setattr, obj, 'policy', 'foo')


class TestVirtCPUFeatureObj(test_objects._LocalTest, _TestVirtCPUFeatureObj):
    pass