예제 #1
0
 def test_host_state_obj_to_dict_numa_topology_limits_conversion(self):
     """Tests that _host_state_obj_to_dict properly converts a
     NUMATopologyLimits object in the HostState.limits if found and
     that other unexpected objects aren't converted.
     """
     host_state = host_manager.HostState('fake-host', 'fake-node',
                                         uuids.cell_uuid)
     # The NUMATopologyFilter sets host_state.limits['numa_topology'] to
     # a NUMATopologyLimits object which is what we want to verify gets
     # converted to a primitive in _host_state_obj_to_dict.
     numa_limits = objects.NUMATopologyLimits(
         cpu_allocation_ratio=CONF.cpu_allocation_ratio,
         ram_allocation_ratio=CONF.ram_allocation_ratio)
     host_state.limits['numa_topology'] = numa_limits
     # Set some other unexpected object to assert we don't convert it.
     ignored_limits = objects.SchedulerLimits()
     host_state.limits['ignored'] = ignored_limits
     result = manager._host_state_obj_to_dict(host_state)
     expected = {
         'host': 'fake-host',
         'nodename': 'fake-node',
         'limits': {
             'numa_topology': numa_limits.obj_to_primitive(),
             'ignored': ignored_limits
         }
     }
     self.assertDictEqual(expected, result)
     # Make sure the original limits weren't changed.
     self.assertIsInstance(host_state.limits['numa_topology'],
                           objects.NUMATopologyLimits)
예제 #2
0
    def _test_populate_filter_props(self, selection_obj=True,
                                    with_retry=True,
                                    force_hosts=None,
                                    force_nodes=None,
                                    no_limits=None):
        if force_hosts is None:
            force_hosts = []
        if force_nodes is None:
            force_nodes = []
        if with_retry:
            if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
                 or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
                filter_properties = dict(force_hosts=force_hosts,
                                         force_nodes=force_nodes)
            elif len(force_hosts) > 1 or len(force_nodes) > 1:
                filter_properties = dict(retry=dict(hosts=[]),
                                         force_hosts=force_hosts,
                                         force_nodes=force_nodes)
            else:
                filter_properties = dict(retry=dict(hosts=[]))
        else:
            filter_properties = dict()

        if no_limits:
            fake_limits = None
        else:
            fake_limits = objects.SchedulerLimits(vcpu=1, disk_gb=2,
                    memory_mb=3, numa_topology=None)
        selection = objects.Selection(service_host="fake-host",
                nodename="fake-node", limits=fake_limits)
        if not selection_obj:
            selection = selection.to_dict()
            fake_limits = fake_limits.to_dict()

        scheduler_utils.populate_filter_properties(filter_properties,
                                                   selection)

        enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
        enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
        if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
            # So we can check for 2 hosts
            scheduler_utils.populate_filter_properties(filter_properties,
                                                       selection)

        if force_hosts:
            expected_limits = None
        elif no_limits:
            expected_limits = {}
        else:
            expected_limits = fake_limits
        self.assertEqual(expected_limits,
                         filter_properties.get('limits'))

        if (with_retry and enable_retry_force_hosts
                       and enable_retry_force_nodes):
            self.assertEqual([['fake-host', 'fake-node'],
                              ['fake-host', 'fake-node']],
                             filter_properties['retry']['hosts'])
        else:
            self.assertNotIn('retry', filter_properties)
예제 #3
0
def fake_spec_obj(remove_id=False):
    ctxt = context.RequestContext('fake', 'fake')
    req_obj = objects.RequestSpec(ctxt)
    if not remove_id:
        req_obj.id = 42
    req_obj.instance_uuid = uuidutils.generate_uuid()
    req_obj.image = IMAGE_META
    req_obj.numa_topology = INSTANCE_NUMA_TOPOLOGY
    req_obj.pci_requests = PCI_REQUESTS
    req_obj.flavor = fake_flavor.fake_flavor_obj(ctxt)
    req_obj.retry = objects.SchedulerRetries()
    req_obj.limits = objects.SchedulerLimits()
    req_obj.instance_group = objects.InstanceGroup(uuid=uuids.instgroup)
    req_obj.project_id = 'fake'
    req_obj.user_id = 'fake-user'
    req_obj.num_instances = 1
    req_obj.availability_zone = None
    req_obj.ignore_hosts = ['host2', 'host4']
    req_obj.force_hosts = ['host1', 'host3']
    req_obj.force_nodes = ['node1', 'node2']
    req_obj.scheduler_hints = {'hint': ['over-there']}
    req_obj.requested_destination = None
    # This should never be a changed field
    req_obj.obj_reset_changes(['id'])
    return req_obj
예제 #4
0
 def test_to_legacy_filter_properties_dict(self):
     fake_numa_limits = objects.NUMATopologyLimits()
     fake_computes_obj = objects.ComputeNodeList(
         objects=[objects.ComputeNode(host='fake1',
                                      hypervisor_hostname='node1')])
     spec = objects.RequestSpec(
         ignore_hosts=['ignoredhost'],
         force_hosts=['fakehost'],
         force_nodes=['fakenode'],
         retry=objects.SchedulerRetries(num_attempts=1,
                                        hosts=fake_computes_obj),
         limits=objects.SchedulerLimits(numa_topology=fake_numa_limits,
                                        vcpu=1.0,
                                        disk_gb=10.0,
                                        memory_mb=8192.0),
         instance_group=objects.InstanceGroup(hosts=['fake1'],
                                              policies=['affinity']),
         scheduler_hints={'foo': ['bar']})
     expected = {'ignore_hosts': ['ignoredhost'],
                 'force_hosts': ['fakehost'],
                 'force_nodes': ['fakenode'],
                 'retry': {'num_attempts': 1,
                           'hosts': [['fake1', 'node1']]},
                 'limits': {'numa_topology': fake_numa_limits,
                            'vcpu': 1.0,
                            'disk_gb': 10.0,
                            'memory_mb': 8192.0},
                 'group_updated': True,
                 'group_hosts': set(['fake1']),
                 'group_policies': set(['affinity']),
                 'scheduler_hints': {'foo': 'bar'}}
     self.assertEqual(expected, spec.to_legacy_filter_properties_dict())
예제 #5
0
 def setUp(self):
     super(PrepResizeAtDestTaskTestCase, self).setUp()
     host_selection = objects.Selection(service_host='fake-host',
                                        nodename='fake-host',
                                        limits=objects.SchedulerLimits())
     self.task = cross_cell_migrate.PrepResizeAtDestTask(
         nova_context.get_context(),
         objects.Instance(uuid=uuids.instance),
         objects.Flavor(),
         objects.Migration(),
         objects.RequestSpec(),
         compute_rpcapi=mock.Mock(),
         host_selection=host_selection,
         network_api=mock.Mock(),
         volume_api=mock.Mock())
예제 #6
0
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import query
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_instance


fake_limits1 = objects.SchedulerLimits()
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
        cell_uuid=uuids.cell, limits=fake_limits1,
        compute_node_uuid=uuids.compute_node1)
fake_limits2 = objects.SchedulerLimits()
fake_selection2 = objects.Selection(service_host="host2", nodename="node2",
        cell_uuid=uuids.cell, limits=fake_limits2,
        compute_node_uuid=uuids.compute_node2)


class LiveMigrationTaskTestCase(test.NoDBTestCase):
    def setUp(self):
        super(LiveMigrationTaskTestCase, self).setUp()
        self.context = nova_context.get_admin_context()
        self.instance_host = "host"
        self.instance_uuid = uuids.instance