def start_computes_and_servers(self): # Start 2 computes self.start_computes({ 'host_a': fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=4, cpu_cores=1, cpu_threads=1, kB_mem=10740000), 'host_b': fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=4, cpu_cores=1, cpu_threads=1, kB_mem=10740000) }) # Create a 2-CPU flavor extra_spec = {'hw:cpu_policy': 'dedicated'} flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec) # Boot 2 servers with 2 CPUs each, one on host_a and one on host_b. # Given the cpu_dedicated_set we set earlier, they should both be on # CPUs 0,1. for server_name, host in [('server_a', 'host_a'), ('server_b', 'host_b')]: server = self._create_server(flavor_id=flavor, host=host, networks='none') setattr(self, server_name, self._wait_for_state_change(server, 'ACTIVE')) self.assertEqual(host, self.get_host(server['id'])) self._assert_instance_pinned_cpus(server['id'], [0, 1], [0, 1])
def setUp(self): super().setUp() self.neutron.list_extensions = self.list_extensions self.neutron_api = neutron.API() # TODO(sean-k-mooney): remove after # I275509eb0e0eb9eaf26fe607b7d9a67e1edc71f8 # has merged. self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.connector', fake_os_brick_connector)) self.start_compute( hostname='start_host', host_info=fakelibvirt.HostInfo( cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=2)) self.start_compute( hostname='end_host', host_info=fakelibvirt.HostInfo( cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=2)) self.ctxt = context.get_admin_context() # TODO(sean-k-mooney): remove this when it is part of ServersTestBase self.useFixture(fixtures.MonkeyPatch( 'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.migrateToURI3', self._migrate_stub))
def _test(self, pin_source, pin_cond, expect_success=True): self.start_compute(hostname='source', host_info=fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=2, cpu_cores=1, cpu_threads=1, kB_mem=10740000)) self.start_compute(hostname='dest', host_info=fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=2, cpu_cores=1, cpu_threads=1, kB_mem=10740000)) ctxt = context.get_admin_context() src_mgr = self.computes['source'].manager cond_mgr = self.conductor.manager.compute_task_mgr if pin_source: src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI( '5.2') if pin_cond: cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI( '5.2') self.assertEqual( not pin_source, src_mgr.compute_rpcapi.router.client(ctxt).can_send_version('5.3')) self.assertEqual( not pin_cond, cond_mgr.compute_rpcapi.router.client(ctxt).can_send_version( '5.3')) extra_spec = {'hw:numa_nodes': 1, 'hw:cpu_policy': 'dedicated'} flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec) server1 = self._create_server(flavor_id=flavor, networks='none') server2 = self._create_server(flavor_id=flavor, networks='none') if self.get_host(server1['id']) == 'source': self.migrating_server = server1 else: self.migrating_server = server2 self.api.post_server_action( self.migrating_server['id'], { 'os-migrateLive': { 'host': 'dest', 'block_migration': 'auto', 'force': True } }) self._wait_for_state_change(self.migrating_server, 'ACTIVE') if expect_success: final_host = 'dest' self._wait_for_migration_status(self.migrating_server, ['completed']) else: final_host = 'source' self._wait_for_migration_status(self.migrating_server, ['failed']) self.assertEqual(final_host, self.get_host(self.migrating_server['id'])) self.assertTrue(self.migrate_stub_ran)
def test_cold_migrate_with_physnet_fails(self): host_infos = [ # host 1 has room on both nodes fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000), # host 2 has no second node, where the desired physnet is # reported to be attached fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=1, cpu_threads=1, kB_mem=15740000), ] # Start services self.computes = {} for host in ['test_compute0', 'test_compute1']: host_info = host_infos.pop(0) fake_connection = self._get_connection( host_info=host_info, hostname=host) # This is fun. Firstly we need to do a global'ish mock so we can # actually start the service. with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): compute = self.start_service('compute', host=host) # Once that's done, we need to do some tweaks to each individual # compute "service" to make sure they return unique objects compute.driver._host.get_connection = lambda: fake_connection self.computes[host] = compute # Create server extra_spec = {'hw:numa_nodes': '1'} flavor_id = self._create_flavor(extra_spec=extra_spec) networks = [ {'uuid': base.LibvirtNeutronFixture.network_1['id']}, ] good_server = self._build_server(flavor_id) good_server['networks'] = networks post = {'server': good_server} created_server = self.api.post_server(post) server = self._wait_for_state_change(created_server, 'BUILD') self.assertEqual('ACTIVE', server['status']) # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should # probably be less...dumb with mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.migrate_disk_and_power_off', return_value='{}'): ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_action, server['id'], {'migrate': None}) self.assertEqual(400, ex.response.status_code) self.assertIn('No valid host', six.text_type(ex))
def test_different_page_sizes(self): self.start_compute(hostname='host_a', host_info=fakelibvirt.HostInfo( kB_mem=1024000, mempages={ 0: fakelibvirt.create_mempages([(4, 256000), (1024, 1000)]) })) self.start_compute(hostname='host_b', host_info=fakelibvirt.HostInfo( kB_mem=1024000, mempages={ 0: fakelibvirt.create_mempages([(4, 256000), (2048, 500)]), })) extra_spec = { 'hw:numa_nodes': 1, 'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'large' } flavor = self._create_flavor(vcpu=2, memory_mb=512, extra_spec=extra_spec) server = self._build_server( flavor_id=flavor, image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6') server['networks'] = 'none' post = {'server': server} server = self.api.post_server(post) self._wait_for_state_change(server, 'ACTIVE') initial_host = self.get_host(server['id']) dest_host = 'host_a' if initial_host == 'host_b' else 'host_b' # NOTE(artom) Because we use the CastAsCallFixture, we expect the # MigrationPreCheckError to be bubbled up to the API as an error 500. self.api.api_post('/servers/%s/action' % server['id'], { 'os-migrateLive': { 'host': dest_host, 'block_migration': 'auto', 'force': True } }, check_response_status=[500]) self._wait_for_state_change(server, 'ACTIVE') self._wait_for_migration_status(server, ['error']) self.assertEqual(initial_host, self.get_host(server['id'])) self.assertIsNone(self._get_migration_context(server['id'])) log_out = self.stdlog.logger.output self.assertIn( 'Migration pre-check error: ' 'Insufficient compute resources: ' 'Requested page size is different from current page ' 'size.', log_out)
def _start_compute_service(self, hostname): fake_connection = self._get_connection( host_info=fakelibvirt.HostInfo(cpu_nodes=2, kB_mem=8192), # We want to create two pGPUs but no other PCI devices pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2), hostname=hostname) with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): # this method will update a self.computes dict keyed by hostname compute = self._start_compute(hostname) compute.driver._host.get_connection = lambda: fake_connection rp_uuid = self._get_provider_uuid_by_name(hostname) rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid) for rp in rp_uuids: inventory = self._get_provider_inventory(rp) if orc.VGPU in inventory: usage = self._get_provider_usages(rp) self.assertEqual(16, inventory[orc.VGPU]['total']) self.assertEqual(0, usage[orc.VGPU]) # Since we haven't created any mdevs yet, we shouldn't find them self.assertEqual([], compute.driver._get_mediated_devices()) return compute
def test_success(self): self.flags(cpu_dedicated_set='0-7', group='compute') host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='test_compute0') flavor_id = self._create_flavor( extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1' }) server = self._build_server(flavor_id=flavor_id) created = self.api.post_server({'server': server}) instance = self._wait_for_state_change(created, 'ACTIVE') self._delete_server(instance)
def setUp(self): super().setUp() # Launch a single libvirt based compute service with a single NUMA node host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) self.start_compute(host_info=host_info, hostname='compute1') # Use a flavor requesting 2 NUMA nodes that we know will always fail self.flavor_id = self._create_flavor(extra_spec={'hw:numa_nodes': '2'}) # Craft a common bfv server request for use within each test self.volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL self.server = { 'name': 'test', 'flavorRef': self.flavor_id, 'imageRef': '', 'networks': 'none', 'block_device_mapping_v2': [{ 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0, 'uuid': self.volume_id }] }
def _start_compute_service(self, hostname): fake_connection = self._get_connection( # Need a host to support creating more servers with vpmems host_info=fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000), hostname=hostname) self.mock_conn.return_value = fake_connection compute = self._start_compute(host=hostname) # Ensure populating the existing pmems correctly. vpmems = compute.driver._vpmems_by_name expected_vpmems = { 'ns_0': objects.LibvirtVPMEMDevice( label='4GB', name='ns_0', devpath='/dev/dax0.0', size=4292870144, align=2097152), 'ns_1': objects.LibvirtVPMEMDevice( label='SMALL', name='ns_1', devpath='/dev/dax0.1', size=4292870144, align=2097152), 'ns_2': objects.LibvirtVPMEMDevice( label='SMALL', name='ns_2', devpath='/dev/dax0.2', size=4292870144, align=2097152)} self.assertDictEqual(expected_vpmems, vpmems) # Ensure reporting vpmems resources correctly rp_uuid = self._get_provider_uuid_by_host(compute.host) inventory = self._get_provider_inventory(rp_uuid) self.assertEqual(1, inventory['CUSTOM_PMEM_NAMESPACE_4GB']['total']) self.assertEqual(2, inventory['CUSTOM_PMEM_NAMESPACE_SMALL']['total']) return compute
def test_success(self): host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='test_compute0') flavor = self._create_flavor( extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1' }) server = self._build_server(flavor) created = self.api.post_server({'server': server}) instance = self.api.get_server(created['id']) instance = self._wait_for_state_change(instance, 'BUILD') self.assertEqual('ACTIVE', instance['status']) self._delete_server(instance['id'])
def test_create_server_with_legacy_pinning_policy(self): """Create a server using the legacy 'hw:cpu_policy' extra spec. This should pass and result in a guest NUMA topology with pinned CPUs. """ self.flags(cpu_dedicated_set='0-9', cpu_shared_set=None, group='compute') self.flags(vcpu_pin_set=None) host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=5, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = { 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer', } flavor_id = self._create_flavor(vcpu=5, extra_spec=extra_spec) expected_usage = {'DISK_GB': 20, 'MEMORY_MB': 2048, 'PCPU': 5} server = self._run_build_test(flavor_id, expected_usage=expected_usage) inst = objects.Instance.get_by_uuid(self.ctxt, server['id']) self.assertEqual(1, len(inst.numa_topology.cells)) self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores)
def test_create_server_with_hugepages(self): """Create a server with huge pages. Configuring huge pages against a server also necessitates configuring a NUMA topology. """ host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=2, cpu_cores=2, cpu_threads=2, kB_mem=(1024 * 1024 * 16)) # GB self.mock_conn.return_value = self._get_connection(host_info=host_info) # create 1024 * 2 MB huge pages, and allocate the rest of the 16 GB as # small pages for cell in host_info.numa_topology.cells: huge_pages = 1024 small_pages = (host_info.kB_mem - (2048 * huge_pages)) // 4 cell.mempages = fakelibvirt.create_mempages([ (4, small_pages), (2048, huge_pages), ]) extra_spec = {'hw:mem_page_size': 'large'} flavor_id = self._create_flavor(memory_mb=2048, extra_spec=extra_spec) expected_usage = {'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2} server = self._run_build_test(flavor_id, expected_usage=expected_usage) ctx = nova_context.get_admin_context() inst = objects.Instance.get_by_uuid(ctx, server['id']) self.assertEqual(1, len(inst.numa_topology.cells)) self.assertEqual(2048, inst.numa_topology.cells[0].pagesize) # kB self.assertEqual(2048, inst.numa_topology.cells[0].memory) # MB
def test_create_server_with_pci_dev_and_numa_fails(self): """This test ensures that it is not possible to allocated CPU and memory resources from one NUMA node and a PCI device from another. """ host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # boot one instance with no PCI device to "fill up" NUMA node 0 extra_spec = { 'hw:cpu_policy': 'dedicated', } flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec) self._run_build_test(flavor_id) # now boot one with a PCI device, which should fail to boot extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id, end_status='ERROR')
def test_create_server_with_pci_dev_and_numa(self): """Validate behavior of 'preferred' PCI NUMA policy. This test ensures that it *is* possible to allocate CPU and memory resources from one NUMA node and a PCI device from another *if* PCI NUMA policies are in use. """ host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # boot one instance with no PCI device to "fill up" NUMA node 0 extra_spec = { 'hw:cpu_policy': 'dedicated', } flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec) self._run_build_test(flavor_id) # now boot one with a PCI device, which should succeed thanks to the # use of the PCI policy extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id)
def test_create_server_with_pcpu_quota_fails(self): """Create a pinned instance on a host with PCPUs but not enough quota. This should fail because the quota request should fail. """ self.flags(cpu_dedicated_set='0-7', cpu_shared_set=None, group='compute') self.flags(vcpu_pin_set=None) host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = {'resources:PCPU': '2'} flavor_id = self._create_flavor(vcpu=2, extra_spec=extra_spec) # Update the core quota less than we requested self.api.update_quota({'cores': 1}) # NOTE(bhagyashris): Always use host as 'compute1' so that it's # possible to get resource provider information for verifying # compute usages. This host name 'compute1' is hard coded in # Connection class in fakelibvirt.py. # TODO(stephenfin): Remove the hardcoded limit, possibly overridding # 'start_service' to make sure there isn't a mismatch self.compute = self.start_service('compute', host='compute1') post = {'server': self._build_server(flavor_id)} ex = self.assertRaises(client.OpenStackApiException, self.api.post_server, post) self.assertEqual(403, ex.response.status_code)
def test_create_server_with_pcpu(self): """Create a server using an explicit 'resources:PCPU' request. This should pass and result in a guest NUMA topology with pinned CPUs. """ self.flags(cpu_dedicated_set='0-7', cpu_shared_set=None, group='compute') self.flags(vcpu_pin_set=None) host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = {'resources:PCPU': '2'} flavor_id = self._create_flavor(vcpu=2, extra_spec=extra_spec) expected_usage = {'DISK_GB': 20, 'MEMORY_MB': 2048, 'PCPU': 2} server = self._run_build_test(flavor_id, expected_usage=expected_usage) ctx = nova_context.get_admin_context() inst = objects.Instance.get_by_uuid(ctx, server['id']) self.assertEqual(1, len(inst.numa_topology.cells)) self.assertEqual(1, inst.numa_topology.cells[0].cpu_topology.cores) self.assertEqual(2, inst.numa_topology.cells[0].cpu_topology.threads)
def test_create_server_with_pcpu_fails(self): """Create a pinned instance on a host with no PCPUs. This should fail because we're explicitly requesting PCPUs and the host isn't reporting them. """ self.flags(cpu_shared_set='0-9', cpu_dedicated_set=None, group='compute') self.flags(vcpu_pin_set=None) host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=5, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = {'resources:PCPU': 2} flavor_id = self._create_flavor(vcpu=2, extra_spec=extra_spec) self._run_build_test(flavor_id, end_status='ERROR', filter_called_on_error=False)
def test_create_server_with_legacy_pinning_policy_old_configuration(self): """Create a server using the legacy extra spec and configuration. This should pass and result in a guest NUMA topology with pinned CPUs, though we'll still be consuming VCPUs (which would in theory be fixed during a later reshape). """ self.flags(cpu_dedicated_set=None, cpu_shared_set=None, group='compute') self.flags(vcpu_pin_set='0-7') host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = { 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer', } flavor_id = self._create_flavor(extra_spec=extra_spec) expected_usage = {'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2} self._run_build_test(flavor_id, expected_usage=expected_usage)
def _test_create_server_with_networks(self, flavor_id, networks, end_status='ACTIVE'): host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='test_compute0') # Create server good_server = self._build_server(flavor_id=flavor_id) good_server['networks'] = networks post = {'server': good_server} created_server = self.api.post_server(post) LOG.debug("created_server: %s", created_server) found_server = self.api.get_server(created_server['id']) return self._wait_for_state_change(found_server, end_status)
def test_create_server_with_numa_topology(self): """Create a server with two NUMA nodes. This should pass and result in a guest NUMA topology with two NUMA nodes. """ host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = {'hw:numa_nodes': '2'} flavor_id = self._create_flavor(vcpu=2, extra_spec=extra_spec) expected_usage = {'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2} server = self._run_build_test(flavor_id, expected_usage=expected_usage) ctx = nova_context.get_admin_context() inst = objects.Instance.get_by_uuid(ctx, server['id']) self.assertEqual(2, len(inst.numa_topology.cells)) self.assertNotIn('cpu_topology', inst.numa_topology.cells[0]) self.assertNotIn('cpu_topology', inst.numa_topology.cells[1])
def test_create_server_with_legacy_pinning_policy_fails(self): """Create a pinned instance on a host with no PCPUs. This should fail because we're translating the extra spec and the host isn't reporting the PCPUs we need. """ self.flags(cpu_shared_set='0-9', cpu_dedicated_set=None, group='compute') self.flags(vcpu_pin_set=None) host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=5, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection extra_spec = { 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer', } flavor_id = self._create_flavor(vcpu=5, extra_spec=extra_spec) self._run_build_test(flavor_id, end_status='ERROR')
def test_rebuild_server_with_numa_inplace_fails(self): """Create a NUMA instance and ensure in place rebuild fails. """ # Create a flavor consuming 2 pinned cpus with an implicit # numa topology of 1 virtual numa node. extra_spec = {'hw:cpu_policy': 'dedicated'} flavor_id = self._create_flavor(extra_spec=extra_spec) # cpu_cores is set to 2 to ensure that we have enough space # to boot the vm but not enough space to rebuild # by doubling the resource use during scheduling. host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='compute1') server = self._create_active_server( server_args={"flavorRef": flavor_id}) # This should succeed as the numa constraints do not change. self._rebuild_server(server, self.image_ref_1)
def test_rebuild_server_with_different_numa_topology_fails(self): """Create a NUMA instance and ensure inplace rebuild fails. """ # Create a flavor consuming 2 pinned cpus with an implicit # numa topology of 1 virtual numa node. extra_spec = {'hw:cpu_policy': 'dedicated'} flavor_id = self._create_flavor(extra_spec=extra_spec) host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=4, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='compute1') server = self._create_active_server( server_args={"flavorRef": flavor_id}) # The original vm had an implicit numa topology of 1 virtual numa node # so we alter the requested numa topology in image_ref_1 to request # 2 virtual numa nodes. ctx = nova_context.get_admin_context() image_meta = {'properties': {'hw_numa_nodes': 2}} self.fake_image_service.update(ctx, self.image_ref_1, image_meta) # NOTE(sean-k-mooney): this should fail because rebuild uses noop # claims therefore it is not allowed for the NUMA topology or resource # usage to change during a rebuild. ex = self.assertRaises(client.OpenStackApiException, self._rebuild_server, server, self.image_ref_1) self.assertEqual(400, ex.response.status_code) self.assertIn("An instance's NUMA topology cannot be changed", six.text_type(ex))
def test_rebuild_server_with_numa(self): """Create a NUMA instance and ensure it can be rebuilt. """ # Create a flavor consuming 2 pinned cpus with an implicit # numa topology of 1 virtual numa node. extra_spec = {'hw:cpu_policy': 'dedicated'} flavor_id = self._create_flavor(extra_spec=extra_spec) # Create a host with 4 physical cpus to allow rebuild leveraging # the free space to ensure the numa topology filter does not # eliminate the host. host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=4, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='compute1') server = self._create_active_server( server_args={"flavorRef": flavor_id}) # this should succeed as the NUMA topology has not changed # and we have enough resources on the host. We rebuild with # a different image to force the rebuild to query the scheduler # to validate the host. self._rebuild_server(server, self.image_ref_1)
def _get_connection( self, host_info=None, pci_info=None, mdev_info=None, libvirt_version=None, qemu_version=None, hostname=None, ): if not host_info: host_info = fakelibvirt.HostInfo( cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=(16 * units.Gi) // units.Ki, ) # sanity check self.assertGreater( 16, host_info.cpus, "Host.get_online_cpus is only accounting for 16 CPUs but you're " "requesting %d; change the mock or your test" % host_info.cpus) libvirt_version = libvirt_version or fakelibvirt.FAKE_LIBVIRT_VERSION qemu_version = qemu_version or fakelibvirt.FAKE_QEMU_VERSION fake_connection = fakelibvirt.Connection('qemu:///system', version=libvirt_version, hv_version=qemu_version, host_info=host_info, pci_info=pci_info, mdev_info=mdev_info, hostname=hostname) return fake_connection
def test_insufficient_resources(self): self.start_computes({ 'host_a': fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=3, cpu_cores=1, cpu_threads=1, kB_mem=10740000), 'host_b': fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=2, cpu_cores=1, cpu_threads=1, kB_mem=10740000) }) extra_spec = {'hw:numa_nodes': 1, 'hw:cpu_policy': 'dedicated'} flavor = self._create_flavor(vcpu=3, extra_spec=extra_spec) server = self._build_server( flavor_id=flavor, image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6') server['networks'] = 'none' post = {'server': server} server = self.api.post_server(post) self._wait_for_state_change(server, 'ACTIVE') self.assertEqual('host_a', self.get_host(server['id'])) # NOTE(artom) Because we use the CastAsCall fixture, we expect the # MigrationPreCheckError to be bubbled up to the API as an error 500. # TODO(artom) Stop using CastAsCall to make it more realistic. self.api.api_post('/servers/%s/action' % server['id'], { 'os-migrateLive': { 'host': 'host_b', 'block_migration': 'auto', 'force': True } }, check_response_status=[500]) self._wait_for_state_change(server, 'ACTIVE') self._wait_for_migration_status(server, ['error']) self.assertIsNone(self._get_migration_context(server['id'])) self.assertEqual('host_a', self.get_host(server['id'])) log_out = self.stdlog.logger.output self.assertIn( 'Migration pre-check error: ' 'Insufficient compute resources: ' 'Requested instance NUMA topology cannot fit', log_out)
def start_computes(self, host_info_dict=None, save_rp_uuids=False): """Start compute services. The started services will be saved in self.computes, keyed by hostname. :param host_info_dict: A hostname -> fakelibvirt.HostInfo object dictionary representing the libvirt HostInfo of each compute host. If None, the default is to start 2 computes, named test_compute0 and test_compute1, with 2 NUMA nodes, 2 cores per node, 2 threads per core, and 16GB of RAM. :param save_rp_uuids: If True, save the resource provider UUID of each started compute in self.compute_rp_uuids, keyed by hostname. """ if host_info_dict is None: host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) host_info_dict = { 'test_compute0': host_info, 'test_compute1': host_info } def start_compute(host, host_info): fake_connection = self._get_connection(host_info=host_info, hostname=host) # This is fun. Firstly we need to do a global'ish mock so we can # actually start the service. with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): compute = self.start_service('compute', host=host) # Once that's done, we need to tweak the compute "service" to # make sure it returns unique objects. We do this inside the # mock context to avoid a small window between the end of the # context and the tweaking where get_connection would revert to # being an autospec mock. compute.driver._host.get_connection = lambda: fake_connection return compute self.computes = {} self.compute_rp_uuids = {} for host, host_info in host_info_dict.items(): # NOTE(artom) A lambda: foo construct returns the value of foo at # call-time, so if the value of foo changes with every iteration of # a loop, every call to the lambda will return a different value of # foo. Because that's not what we want in our lambda further up, # we can't put it directly in the for loop, and need to introduce # the start_compute function to create a scope in which host and # host_info do not change with every iteration of the for loop. self.computes[host] = start_compute(host, host_info) if save_rp_uuids: self.compute_rp_uuids[host] = self.placement_api.get( '/resource_providers?name=%s' % host).body['resource_providers'][0]['uuid']
def test_create_server_with_numa_fails(self): host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {'hw:numa_nodes': '2'} flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id, end_status='ERROR')
def test_create_server_with_PF(self): host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo() fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.PFS_ALIAS_NAME} flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id)
def test_get_server_diagnostics_server_with_VF(self): host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo() fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.VFS_ALIAS_NAME} flavor_id = self._create_flavor(extra_spec=extra_spec) if not self.compute_started: self.compute = self.start_service('compute', host='test_compute0') self.compute_started = True # Create server good_server = self._build_server( image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', flavor_id=flavor_id) good_server['networks'] = [ { 'uuid': base.LibvirtNeutronFixture.network_1['id'] }, { 'uuid': base.LibvirtNeutronFixture.network_4['id'] }, ] post = {'server': good_server} created_server = self.api.post_server(post) self._wait_for_state_change(created_server, 'ACTIVE') diagnostics = self.api.get_server_diagnostics(created_server['id']) self.assertEqual( base.LibvirtNeutronFixture.network_1_port_2['mac_address'], diagnostics['nic_details'][0]['mac_address']) self.assertEqual( base.LibvirtNeutronFixture.network_4_port_1['mac_address'], diagnostics['nic_details'][1]['mac_address']) self.assertIsNotNone(diagnostics['nic_details'][0]['tx_packets']) self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])