コード例 #1
0
ファイル: test_claims.py プロジェクト: sapcc/nova
 def test_numa_topology_fails(self):
     huge_instance = objects.InstanceNUMATopology(cells=[
         objects.InstanceNUMACell(
             id=1, cpuset=set([1, 2, 3, 4, 5]), pcpuset=set(), memory=2048)
     ])
     limit_topo = objects.NUMATopologyLimits(cpu_allocation_ratio=1,
                                             ram_allocation_ratio=1)
     self.assertRaises(exception.ComputeResourcesUnavailable,
                       self._claim,
                       limits={'numa_topology': limit_topo},
                       numa_topology=huge_instance)
コード例 #2
0
    def host_passes(self, host_state, spec_obj):
        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)
            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats,
                flavor=spec_obj.flavor))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
コード例 #3
0
    def test_obj_make_compatible(self):
        network_meta = objects.NetworkMetadata(physnets=set(['foo', 'bar']),
                                               tunneled=True)
        limits = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0,
                                            ram_allocation_ratio=1.0,
                                            network_metadata=network_meta)

        versions = ovo_base.obj_tree_get_versions('NUMATopologyLimits')
        primitive = limits.obj_to_primitive(target_version='1.1',
                                            version_manifest=versions)
        self.assertIn('network_metadata', primitive['nova_object.data'])

        primitive = limits.obj_to_primitive(target_version='1.0',
                                            version_manifest=versions)
        self.assertNotIn('network_metadata', primitive['nova_object.data'])
コード例 #4
0
ファイル: test_numa.py プロジェクト: ychen2u/stx-nova
    def test_to_legacy_limits(self):
        limits = objects.NUMATopologyLimits(
            cpu_allocation_ratio=16,
            ram_allocation_ratio=2)
        host_topo = objects.NUMATopology(cells=[
            objects.NUMACell(id=0, cpuset=set([1, 2]), memory=1024)
        ])

        old_style = {'cells': [
            {'mem': {'total': 1024,
                     'limit': 2048.0},
             'id': 0,
             'cpus': '1,2',
             'cpu_limit': 32.0}]}
        self.assertEqual(old_style, limits.to_dict_legacy(host_topo))
コード例 #5
0
ファイル: test_request_spec.py プロジェクト: wkite/nova
 def test_to_legacy_filter_properties_dict(self):
     fake_numa_limits = objects.NUMATopologyLimits()
     fake_computes_obj = objects.ComputeNodeList(objects=[
         objects.ComputeNode(host='fake1', hypervisor_hostname='node1')
     ])
     fake_dest = objects.Destination(host='fakehost')
     spec = objects.RequestSpec(
         ignore_hosts=['ignoredhost'],
         force_hosts=['fakehost'],
         force_nodes=['fakenode'],
         retry=objects.SchedulerRetries(num_attempts=1,
                                        hosts=fake_computes_obj),
         limits=objects.SchedulerLimits(numa_topology=fake_numa_limits,
                                        vcpu=1.0,
                                        disk_gb=10.0,
                                        memory_mb=8192.0),
         instance_group=objects.InstanceGroup(hosts=['fake1'],
                                              policy='affinity',
                                              members=['inst1', 'inst2']),
         scheduler_hints={'foo': ['bar']},
         requested_destination=fake_dest)
     expected = {
         'ignore_hosts': ['ignoredhost'],
         'force_hosts': ['fakehost'],
         'force_nodes': ['fakenode'],
         'retry': {
             'num_attempts': 1,
             'hosts': [['fake1', 'node1']]
         },
         'limits': {
             'numa_topology': fake_numa_limits,
             'vcpu': 1.0,
             'disk_gb': 10.0,
             'memory_mb': 8192.0
         },
         'group_updated': True,
         'group_hosts': set(['fake1']),
         'group_policies': set(['affinity']),
         'group_members': set(['inst1', 'inst2']),
         'scheduler_hints': {
             'foo': 'bar'
         },
         'requested_destination': fake_dest
     }
     self.assertEqual(expected, spec.to_legacy_filter_properties_dict())
コード例 #6
0
                     memory=3072,
                     cpu_usage=0,
                     memory_usage=0,
                     mempages=[],
                     siblings=[],
                     pinned_cpus=set([])),
    objects.NUMACell(id=1,
                     cpuset=set([3, 4]),
                     memory=3072,
                     cpu_usage=0,
                     memory_usage=0,
                     mempages=[],
                     siblings=[],
                     pinned_cpus=set([]))
])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
    cpu_allocation_ratio=2, ram_allocation_ratio=2)

ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
CONF = cfg.CONF


class FakeVirtDriver(driver.ComputeDriver):
    def __init__(self,
                 pci_support=False,
                 stats=None,
                 numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
        super(FakeVirtDriver, self).__init__(None)
        self.memory_mb = FAKE_VIRT_MEMORY_MB
コード例 #7
0
    def host_passes(self, host_state, spec_obj):
        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties
        requested_topology = spec_obj.numa_topology
        host_topology = host_state.numa_topology
        pci_requests = spec_obj.pci_requests

        network_metadata = None
        if 'network_metadata' in spec_obj:
            network_metadata = spec_obj.network_metadata

        if pci_requests:
            pci_requests = pci_requests.requests

        if not self._satisfies_cpu_policy(host_state, extra_specs,
                                          image_props):
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)

            if network_metadata:
                limits.network_metadata = network_metadata

            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            return False
        else:
            return True
コード例 #8
0
ファイル: test_tracker.py プロジェクト: Drooids/nova
_2MB = 2 * units.Mi / units.Ki

_INSTANCE_NUMA_TOPOLOGIES = {
    '2mb':
    objects.InstanceNUMATopology(cells=[
        objects.InstanceNUMACell(
            id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
        objects.InstanceNUMACell(
            id=1, cpuset=set([3]), memory=_2MB, pagesize=0)
    ]),
}

_NUMA_LIMIT_TOPOLOGIES = {
    '2mb':
    objects.NUMATopologyLimits(id=0,
                               cpu_allocation_ratio=1.0,
                               ram_allocation_ratio=1.0),
}

_NUMA_PAGE_TOPOLOGIES = {
    '2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}

_NUMA_HOST_TOPOLOGIES = {
    '2mb':
    objects.NUMATopology(cells=[
        objects.NUMACell(id=0,
                         cpuset=set([1, 2]),
                         memory=_2MB,
                         cpu_usage=0,
                         memory_usage=0,
コード例 #9
0
from oslo_serialization import jsonutils

from nova import exception
from nova import objects
from nova.scheduler import client
from nova.scheduler.client import report
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import utils as scheduler_utils
from nova.scheduler import weights
from nova import test  # noqa
from nova.tests.unit.scheduler import test_scheduler
from nova.tests import uuidsentinel as uuids


fake_numa_limit = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0,
        ram_allocation_ratio=1.0)
fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2,
        "numa_topology": fake_numa_limit}
fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit)
fake_alloc = {"allocations": [
        {"resource_provider": {"uuid": uuids.compute_node},
         "resources": {"VCPU": 1,
                       "MEMORY_MB": 1024,
                       "DISK_GB": 100}
        }]}
fake_alloc_version = "1.23"
json_alloc = jsonutils.dumps(fake_alloc)
fake_selection = objects.Selection(service_host="fake_host",
        nodename="fake_node", compute_node_uuid=uuids.compute_node,
        cell_uuid=uuids.cell, limits=fake_limit_obj,
        allocation_request=json_alloc,
コード例 #10
0
    def host_passes(self, host_state, spec_obj):
        # WRS - disable this filter for non-libvirt hypervisor
        if not utils.is_libvirt_compute(host_state):
            return True

        # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
        # unfortunate side effect of modifying 'spec_obj.numa_topology' - an
        # InstanceNUMATopology object - by populating the 'cpu_pinning' field.
        # This is rather rude and said function should be reworked to avoid
        # doing this. That's a large, non-backportable cleanup however, so for
        # now we just duplicate spec_obj to prevent changes propagating to
        # future filter calls.
        # Note that we still need to pass the original spec_obj to
        # filter_reject so the error message persists.
        cloned_spec_obj = spec_obj.obj_clone()

        ram_ratio = host_state.ram_allocation_ratio
        cpu_ratio = host_state.cpu_allocation_ratio
        extra_specs = cloned_spec_obj.flavor.extra_specs
        image_props = cloned_spec_obj.image.properties
        requested_topology = cloned_spec_obj.numa_topology
        host_topology, _fmt = hardware.host_topology_and_format_from_host(
            host_state)
        pci_requests = cloned_spec_obj.pci_requests

        if pci_requests:
            pci_requests = pci_requests.requests

        details = utils.details_initialize(details=None)

        if not self._satisfies_cpu_policy(
                host_state, extra_specs, image_props, details=details):
            msg = 'Host not useable. ' + ', '.join(details.get('reason', []))
            self.filter_reject(host_state, spec_obj, msg)
            return False

        if requested_topology and host_topology:
            limits = objects.NUMATopologyLimits(cpu_allocation_ratio=cpu_ratio,
                                                ram_allocation_ratio=ram_ratio)

            # WRS: Support strict vs prefer allocation of PCI devices.
            pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity',
                                                'strict')
            pci_strict = False if pci_numa_affinity == 'prefer' else True

            # L3 CAT Support
            if any(cell.cachetune_requested
                   for cell in requested_topology.cells):
                free_closids = (host_state.l3_closids -
                                host_state.l3_closids_used)
                if free_closids < 1:
                    msg = ('Insufficient L3 closids: '
                           'req:%(req)s, avail:%(avail)s' % {
                               'req': 1,
                               'avail': free_closids
                           })
                    self.filter_reject(host_state, spec_obj, msg)
                    return False
                # save limit for compute node to test against
                host_state.limits['closids'] = host_state.l3_closids

            instance_topology = (hardware.numa_fit_instance_to_host(
                host_topology,
                requested_topology,
                limits=limits,
                pci_requests=pci_requests,
                pci_stats=host_state.pci_stats,
                details=details,
                pci_strict=pci_strict))
            if not instance_topology:
                LOG.debug(
                    "%(host)s, %(node)s fails NUMA topology "
                    "requirements. The instance does not fit on this "
                    "host.", {
                        'host': host_state.host,
                        'node': host_state.nodename
                    },
                    instance_uuid=spec_obj.instance_uuid)
                msg = details.get('reason', [])
                self.filter_reject(host_state, spec_obj, msg)
                return False
            host_state.limits['numa_topology'] = limits
            return True
        elif requested_topology:
            LOG.debug(
                "%(host)s, %(node)s fails NUMA topology requirements. "
                "No host NUMA topology while the instance specified "
                "one.", {
                    'host': host_state.host,
                    'node': host_state.nodename
                },
                instance_uuid=spec_obj.instance_uuid)
            msg = 'Missing host topology'
            self.filter_reject(host_state, spec_obj, msg)
            return False
        else:
            return True