Ejemplo n.º 1
0
from nova.virt import driver

FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_DISK_OVERHEAD = 0
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (FAKE_VIRT_MEMORY_MB +
                                  FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(cells=[
    objects.NUMACell(id=0,
                     cpuset=set([1, 2]),
                     memory=3072,
                     cpu_usage=0,
                     memory_usage=0,
                     mempages=[],
                     siblings=[],
                     pinned_cpus=set([])),
    objects.NUMACell(id=1,
                     cpuset=set([3, 4]),
                     memory=3072,
                     cpu_usage=0,
                     memory_usage=0,
                     mempages=[],
                     siblings=[],
                     pinned_cpus=set([]))
])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
    cpu_allocation_ratio=2, ram_allocation_ratio=2)

ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
Ejemplo n.º 2
0
from nova.scheduler import host_manager
from nova.tests import uuidsentinel

NUMA_TOPOLOGY = objects.NUMATopology(cells=[
    objects.NUMACell(
        id=0,
        cpuset=set([1, 2]),
        memory=512,
        cpu_usage=0,
        memory_usage=0,
        mempages=[
            objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
            objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
        ],
        siblings=[],
        pinned_cpus=set([])),
    objects.NUMACell(
        id=1,
        cpuset=set([3, 4]),
        memory=512,
        cpu_usage=0,
        memory_usage=0,
        mempages=[
            objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
            objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
        ],
        siblings=[],
        pinned_cpus=set([]))
])

NUMA_TOPOLOGIES_W_HT = [
    objects.NUMATopology(cells=[
Ejemplo n.º 3
0
 def test_pci_stats_not_equivalent(self):
     pci_stats2 = stats.PciDeviceStats(objects.NUMATopology())
     for dev in [self.fake_dev_1, self.fake_dev_2, self.fake_dev_3]:
         pci_stats2.add_device(dev)
     self.assertNotEqual(self.pci_stats, pci_stats2)
Ejemplo n.º 4
0
NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
fake_stats_db_format = jsonutils.dumps(fake_stats)
# host_ip is coerced from a string to an IPAddress
# but needs to be converted to a string for the database format
fake_host_ip = '127.0.0.1'
fake_numa_topology = objects.NUMATopology(cells=[
    objects.NUMACell(id=0,
                     cpuset=set([1, 2]),
                     memory=512,
                     cpu_usage=0,
                     memory_usage=0,
                     mempages=[],
                     pinned_cpus=set([]),
                     siblings=[]),
    objects.NUMACell(id=1,
                     cpuset=set([3, 4]),
                     memory=512,
                     cpu_usage=0,
                     memory_usage=0,
                     mempages=[],
                     pinned_cpus=set([]),
                     siblings=[])
])
fake_numa_topology_db_format = fake_numa_topology._to_json()
fake_hv_spec = hv_spec.HVSpec(arch='x86_64', hv_type='kvm', vm_mode='hvm')
fake_supported_hv_specs = [fake_hv_spec]
# for backward compatibility, each supported instance object
# is stored as a list in the database
fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
fake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive)
Ejemplo n.º 5
0
_NUMA_PAGE_TOPOLOGIES = {
    '2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}

_NUMA_HOST_TOPOLOGIES = {
    '2mb':
    objects.NUMATopology(cells=[
        objects.NUMACell(id=0,
                         cpuset=set([1, 2]),
                         memory=_2MB,
                         cpu_usage=0,
                         memory_usage=0,
                         mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
                         siblings=[],
                         pinned_cpus=set([])),
        objects.NUMACell(id=1,
                         cpuset=set([3, 4]),
                         memory=_2MB,
                         cpu_usage=0,
                         memory_usage=0,
                         mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
                         siblings=[],
                         pinned_cpus=set([]))
    ]),
}

_INSTANCE_FIXTURES = [
    objects.Instance(
        id=1,
        host=None,  # prevent RT trying to lazy-load this
        node=None,
Ejemplo n.º 6
0
import testtools

from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects

fake_obj_numa = objects.NUMATopology(cells=[
    objects.NUMACell(id=0,
                     cpuset=set([1, 2]),
                     pcpuset=set(),
                     memory=512,
                     cpu_usage=2,
                     memory_usage=256,
                     mempages=[],
                     pinned_cpus=set(),
                     siblings=[set([1]), set([2])]),
    objects.NUMACell(id=1,
                     cpuset=set([3, 4]),
                     pcpuset=set(),
                     memory=512,
                     cpu_usage=1,
                     memory_usage=128,
                     mempages=[],
                     pinned_cpus=set(),
                     siblings=[set([3]), set([4])])
])


class _TestNUMACell(object):
    def test_free_cpus(self):
        cell_a = objects.NUMACell(id=0,
                                  cpuset=set(),
Ejemplo n.º 7
0
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects

fake_obj_numa = objects.NUMATopology(
    cells=[
        objects.NUMACell(
            id=0, cpuset=set([1, 2]), memory=512,
            cpu_usage=2, memory_usage=256,
            mempages=[], pinned_cpus=set([]),
            siblings=[],
            l3_cdp=False,
            l3_size=0,
            l3_granularity=0,
            l3_both_used=0,
            l3_code_used=0,
            l3_data_used=0),
        objects.NUMACell(
            id=1, cpuset=set([3, 4]), memory=512,
            cpu_usage=1, memory_usage=128,
            mempages=[], pinned_cpus=set([]),
            siblings=[],
            l3_cdp=False,
            l3_size=0,
            l3_granularity=0,
            l3_both_used=0,
            l3_code_used=0,
            l3_data_used=0)])


class _TestNUMA(object):
Ejemplo n.º 8
0
 def setUp(self):
     super(PciDeviceVFPFStatsTestCase, self).setUp()
     white_list = ['{"vendor_id":"8086","product_id":"1528"}',
                   '{"vendor_id":"8086","product_id":"1515"}']
     self.flags(passthrough_whitelist=white_list, group='pci')
     self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
Ejemplo n.º 9
0
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects

fake_obj_numa = objects.NUMATopology(cells=[
    objects.NUMACell(
        id=0, cpuset=set([1, 2]), memory=512, cpu_usage=2, memory_usage=256),
    objects.NUMACell(
        id=1, cpuset=set([3, 4]), memory=512, cpu_usage=1, memory_usage=128)
])


class _TestNUMA(object):
    def test_convert_wipe(self):
        d1 = fake_obj_numa._to_dict()
        d2 = objects.NUMATopology.obj_from_primitive(d1)._to_dict()

        self.assertEqual(d1, d2)

    def test_pinning_logic(self):
        obj = objects.NUMATopology(cells=[
            objects.NUMACell(id=0,
                             cpuset=set([1, 2]),
from nova import objects
from nova.objects import compute_node
from nova.objects import hv_spec
from nova.objects import service
from nova.tests.unit import fake_pci_device_pools
from nova.tests.unit.objects import test_objects

NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
fake_stats_db_format = jsonutils.dumps(fake_stats)
# host_ip is coerced from a string to an IPAddress
# but needs to be converted to a string for the database format
fake_host_ip = '127.0.0.1'
fake_numa_topology = objects.NUMATopology(cells=[
    objects.NUMACell(
        id=0, cpuset=set([1, 2]), memory=512, cpu_usage=0, memory_usage=0),
    objects.NUMACell(
        id=1, cpuset=set([3, 4]), memory=512, cpu_usage=0, memory_usage=0)
])
fake_numa_topology_db_format = fake_numa_topology._to_json()
fake_hv_spec = hv_spec.HVSpec(arch='foo', hv_type='bar', vm_mode='foobar')
fake_supported_hv_specs = [fake_hv_spec]
# for backward compatibility, each supported instance object
# is stored as a list in the database
fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
fake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive)
fake_compute_node = {
    'created_at': NOW,
    'updated_at': None,
    'deleted_at': None,
    'deleted': False,
    'id': 123,
Ejemplo n.º 11
0
#    under the License.
"""
Fakes For Scheduler tests.
"""

import six

from nova import objects
from nova.scheduler import driver
from nova.scheduler import host_manager

NUMA_TOPOLOGY = objects.NUMATopology(
                           cells=[objects.NUMACell(
                                      id=0, cpuset=set([1, 2]), memory=512,
                               cpu_usage=0, memory_usage=0, mempages=[],
                               siblings=[], pinned_cpus=set([])),
                                  objects.NUMACell(
                                      id=1, cpuset=set([3, 4]), memory=512,
                                cpu_usage=0, memory_usage=0, mempages=[],
                               siblings=[], pinned_cpus=set([]))])

NUMA_TOPOLOGY_W_HT = objects.NUMATopology(cells=[
    objects.NUMACell(
        id=0, cpuset=set([1, 2, 5, 6]), memory=512,
        cpu_usage=0, memory_usage=0, mempages=[],
        siblings=[set([1, 5]), set([2, 6])], pinned_cpus=set([])),
    objects.NUMACell(
        id=1, cpuset=set([3, 4, 7, 8]), memory=512,
        cpu_usage=0, memory_usage=0, mempages=[],
        siblings=[set([3, 4]), set([7, 8])], pinned_cpus=set([]))
])
Ejemplo n.º 12
0
def _create_pci_stats(counts):
    if counts is None:  # the pci_stats column is nullable
        return None

    pools = [_create_pci_pool(count) for count in counts]
    return stats.PciDeviceStats(objects.NUMATopology(), pools)
Ejemplo n.º 13
0
Fakes For Scheduler tests.
"""

from mox3 import mox
from oslo.serialization import jsonutils

from nova.compute import vm_states
from nova import db
from nova import objects
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager

NUMA_TOPOLOGY = objects.NUMATopology(
                           cells=[objects.NUMACell(
                                      id=0, cpuset=set([1, 2]), memory=512,
                               cpu_usage=0, memory_usage=0, mempages=[]),
                                  objects.NUMACell(
                                      id=1, cpuset=set([3, 4]), memory=512,
                                cpu_usage=0, memory_usage=0, mempages=[])])

COMPUTE_NODES = [
        dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
             disk_available_least=None, free_ram_mb=512, vcpus_used=1,
             free_disk_gb=512, local_gb_used=0, updated_at=None,
             service=dict(host='host1', disabled=False),
             host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
             hypervisor_version=0, numa_topology=None),
        dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
             disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
             free_disk_gb=1024, local_gb_used=0, updated_at=None,
             service=dict(host='host2', disabled=True),
Ejemplo n.º 14
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)

        # WRS: Determine resources consumed for placement candidate check,
        vcpus = spec_obj.flavor.vcpus
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties

        # WRS: The request_spec has stale numa_topology, so must be updated.
        # We can get stale numa_topology if we do an evacuation or
        # live-migration after a resize,
        instance_type = spec_obj.flavor
        image_meta = objects.ImageMeta(properties=image_props)
        try:
            spec_obj.numa_topology = \
                hardware.numa_get_constraints(instance_type, image_meta)
        except Exception as ex:
            LOG.error("Cannot get numa constraints, error=%(err)r",
                      {'err': ex})

        instance_numa_topology = spec_obj.numa_topology
        # WRS: If cpu_thread_policy is ISOLATE and compute has hyperthreading
        # enabled, vcpus claim will be double flavor.vcpus.  Since we don't
        # know the compute node at this point, we'll just request flavor.vcpus
        # and let the numa_topology filter sort this out.
        numa_cell = objects.NUMACell(siblings=[])
        numa_topology = objects.NUMATopology(cells=[numa_cell])._to_json()
        computenode = objects.ComputeNode(numa_topology=numa_topology)
        normalized_resources = \
                  utils.normalized_resources_for_placement_claim(
                             resources, computenode, vcpus, extra_specs,
                             image_props, instance_numa_topology)

        alloc_reqs_by_rp_uuid, provider_summaries = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(
                normalized_resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries = None, None
            else:
                alloc_reqs, provider_summaries = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")

                # Determine the rejection reasons for all hosts based on
                # placement vcpu, memory, and disk criteria. This is done
                # after-the-fact since the placement query does not return
                # any reasons.
                reasons = self.placement_client.get_rejection_reasons(
                    requested=normalized_resources)
                if reasons is None:
                    reasons = {}

                # Populate per-host rejection map based on placement criteria.
                host_states = self.driver.host_manager.get_all_host_states(
                    ctxt)
                for host_state in host_states:
                    if host_state.uuid in reasons:
                        msg = reasons[host_state.uuid]
                        if msg:
                            nova_utils.filter_reject('Placement',
                                                     host_state,
                                                     spec_obj,
                                                     msg,
                                                     append=False)

                reason = 'Placement service found no hosts.'
                filter_properties = spec_obj.to_legacy_filter_properties_dict()
                utils.NoValidHost_extend(filter_properties, reason=reason)
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rr in ar['allocations']:
                        rp_uuid = rr['resource_provider']['uuid']
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
                                                alloc_reqs_by_rp_uuid,
                                                provider_summaries)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Ejemplo n.º 15
0
    def select_destinations(self, context, spec_obj, instance_uuids,
                            alloc_reqs_by_rp_uuid, provider_summaries):
        """Returns a sorted list of HostState objects that satisfy the
        supplied request_spec.

        These hosts will have already had their resources claimed in Placement.

        :param context: The RequestContext object
        :param spec_obj: The RequestSpec object
        :param instance_uuids: List of UUIDs, one for each value of the spec
                               object's num_instances attribute
        :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
                                      UUID, of the allocation requests that may
                                      be used to claim resources against
                                      matched hosts. If None, indicates either
                                      the placement API wasn't reachable or
                                      that there were no allocation requests
                                      returned by the placement API. If the
                                      latter, the provider_summaries will be an
                                      empty dict, not None.
        :param provider_summaries: Optional dict, keyed by resource provider
                                   UUID, of information that will be used by
                                   the filters/weighers in selecting matching
                                   hosts for a request. If None, indicates that
                                   the scheduler driver should grab all compute
                                   node information locally and that the
                                   Placement API is not used. If an empty dict,
                                   indicates the Placement API returned no
                                   potential matches for the requested
                                   resources.
        """
        self.notifier.info(
            context, 'scheduler.select_destinations.start',
            dict(request_spec=spec_obj.to_legacy_request_spec_dict()))

        # NOTE(sbauza): The RequestSpec.num_instances field contains the number
        # of instances created when the RequestSpec was used to first boot some
        # instances. This is incorrect when doing a move or resize operation,
        # so prefer the length of instance_uuids unless it is None.
        num_instances = (len(instance_uuids)
                         if instance_uuids else spec_obj.num_instances)

        # WRS: check against minimum number of instances for success if set
        #      otherwise default to num_instances
        if hasattr(spec_obj, 'min_num_instances'):
            task_state = spec_obj.scheduler_hints.get('task_state')
            # task_state set to None indicates this is not for migration
            if task_state is None:
                num_instances = spec_obj.min_num_instances

        selected_hosts = self._schedule(context, spec_obj, instance_uuids,
                                        alloc_reqs_by_rp_uuid,
                                        provider_summaries)

        # Couldn't fulfill the request_spec
        if len(selected_hosts) < num_instances:
            # NOTE(Rui Chen): If multiple creates failed, set the updated time
            # of selected HostState to None so that these HostStates are
            # refreshed according to database in next schedule, and release
            # the resource consumed by instance in the process of selecting
            # host.
            for host in selected_hosts:
                host.updated = None

            # Log the details but don't put those into the reason since
            # we don't want to give away too much information about our
            # actual environment.
            LOG.debug(
                'There are %(hosts)d hosts available but '
                '%(num_instances)d instances requested to build.', {
                    'hosts': len(selected_hosts),
                    'num_instances': num_instances
                })

            # Determine normalized resource allocation request required to do
            # placement query.
            resources = scheduler_utils.resources_from_request_spec(spec_obj)
            empty_computenode = objects.ComputeNode(
                numa_topology=objects.NUMATopology(
                    cells=[objects.NUMACell(siblings=[])])._to_json())
            normalized_resources = \
                scheduler_utils.normalized_resources_for_placement_claim(
                    resources, empty_computenode,
                    spec_obj.flavor.vcpus,
                    spec_obj.flavor.extra_specs,
                    spec_obj.image.properties,
                    spec_obj.numa_topology)

            # Determine the rejection reasons for all hosts based on
            # placement vcpu, memory, and disk criteria. This is done
            # after-the-fact since the placement query does not return
            # any reasons.
            reasons = self.placement_client.get_rejection_reasons(
                requested=normalized_resources)
            if reasons is None:
                reasons = {}

            # Populate per-host rejection map based on placement criteria.
            host_states = self.host_manager.get_all_host_states(context)
            for host_state in host_states:
                if host_state.uuid in reasons:
                    msg = reasons[host_state.uuid]
                    if msg:
                        utils.filter_reject('Placement',
                                            host_state,
                                            spec_obj,
                                            msg,
                                            append=False)

            # WRS - failure message
            pp = pprint.PrettyPrinter(indent=1)
            spec_ = {
                k.lstrip('_obj_'): v
                for k, v in (spec_obj.__dict__).items()
                if k.startswith('_obj_')
            }
            LOG.warning(
                'CANNOT SCHEDULE:  %(num)s available out of '
                '%(req)s requested.  spec_obj=\n%(spec)s', {
                    'num': len(selected_hosts),
                    'req': num_instances,
                    'spec': pp.pformat(spec_),
                })
            reason = _('There are not enough hosts available.')
            filter_properties = spec_obj.to_legacy_filter_properties_dict()
            scheduler_utils.NoValidHost_extend(filter_properties,
                                               reason=reason)
        else:
            # WRS - success message
            LOG.info(
                'SCHED: PASS. Selected %(hosts)s, uuid=%(uuid)s, '
                'name=%(name)s, display_name=%(display_name)s, '
                'scheduled=%(num)s', {
                    'hosts': selected_hosts,
                    'uuid': spec_obj.instance_uuid,
                    'name': spec_obj.name,
                    'display_name': spec_obj.display_name,
                    'num': len(selected_hosts)
                })

        self.notifier.info(
            context, 'scheduler.select_destinations.end',
            dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
        return selected_hosts