def test_cold_migrate_with_physnet_fails(self): host_infos = [ # host 1 has room on both nodes fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000), # host 2 has no second node, where the desired physnet is # reported to be attached fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=1, cpu_threads=1, kB_mem=15740000), ] # Start services self.computes = {} for host in ['test_compute0', 'test_compute1']: host_info = host_infos.pop(0) fake_connection = self._get_connection(host_info=host_info) fake_connection.getHostname = lambda: host # This is fun. Firstly we need to do a global'ish mock so we can # actually start the service. with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): compute = self.start_service('compute', host=host) # Once that's done, we need to do some tweaks to each individual # compute "service" to make sure they return unique objects compute.driver._host.get_connection = lambda: fake_connection self.computes[host] = compute # Create server extra_spec = {'hw:numa_nodes': '1'} flavor_id = self._create_flavor(extra_spec=extra_spec) networks = [ {'uuid': NUMAAffinityNeutronFixture.network_1['id']}, ] good_server = self._build_server(flavor_id) good_server['networks'] = networks post = {'server': good_server} created_server = self.api.post_server(post) server = self._wait_for_state_change(created_server, 'BUILD') self.assertEqual('ACTIVE', server['status']) # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should # probably be less...dumb with mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.migrate_disk_and_power_off', return_value='{}'): ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_action, server['id'], {'migrate': None}) self.assertEqual(400, ex.response.status_code) self.assertIn('No valid host', six.text_type(ex))
def test_rebuild_server_with_different_numa_topology_fails(self): """Create a NUMA instance and ensure inplace rebuild fails. """ # Create a flavor consuming 2 pinned cpus with an implicit # numa topology of 1 virtual numa node. extra_spec = {'hw:cpu_policy': 'dedicated'} flavor_id = self._create_flavor(extra_spec=extra_spec) host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=4, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='compute1') server = self._create_active_server( server_args={"flavorRef": flavor_id}) # The original vm had an implicit numa topology of 1 virtual numa node # so we alter the requested numa topology in image_ref_1 to request # 2 virtual numa nodes. ctx = nova_context.get_admin_context() image_meta = {'properties': {'hw_numa_nodes': 2}} self.fake_image_service.update(ctx, self.image_ref_1, image_meta) # NOTE(sean-k-mooney): this should fail because rebuild uses noop # claims therefore it is not allowed for the NUMA topology or resource # usage to change during a rebuild. ex = self.assertRaises(client.OpenStackApiException, self._rebuild_server, server, self.image_ref_1) self.assertEqual(400, ex.response.status_code) self.assertIn("An instance's NUMA topology cannot be changed", six.text_type(ex))
def test_create_server_with_pci_dev_and_numa_fails(self, img_mock): """This test ensures that it is not possible to allocated CPU and memory resources from one NUMA node and a PCI device from another. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, numa_node=0) fake_connection = self._get_connection(host_info, pci_info) # Create a flavor extra_spec_vm = {'hw:cpu_policy': 'dedicated', 'hw:numa_node': '1'} extra_spec = {'pci_passthrough:alias': '%s:1' % self.pfs_alias_name, 'hw:numa_nodes': '1', 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer'} vm_flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec_vm) pf_flavor_id = self._create_flavor(extra_spec=extra_spec) host_pass_mock = self._get_pci_passthrough_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.pci_passthrough_filter.PciPassthroughFilter' '.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): vm_server = self._run_build_test(vm_flavor_id, filter_mock) pf_server = self._run_build_test(pf_flavor_id, filter_mock, end_status='ERROR') self._delete_server(vm_server['id']) self._delete_server(pf_server['id'])
def test_create_server_with_pinning(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=5, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) # Create a flavor extra_spec = { 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer', } flavor_id = self._create_flavor(vcpu=5, extra_spec=extra_spec) host_pass_mock = self._get_topology_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.numa_topology_filter.NUMATopologyFilter.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): server = self._run_build_test(flavor_id, filter_mock) ctx = nova_context.get_admin_context() inst = objects.Instance.get_by_uuid(ctx, server['id']) self.assertEqual(1, len(inst.numa_topology.cells)) self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores)
def test_create_server_with_pci_dev_and_numa_fails(self): """This test ensures that it is not possible to allocated CPU and memory resources from one NUMA node and a PCI device from another. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # boot one instance with no PCI device to "fill up" NUMA node 0 extra_spec = { 'hw:cpu_policy': 'dedicated', } flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec) self._run_build_test(flavor_id) # now boot one with a PCI device, which should fail to boot extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id, end_status='ERROR')
def test_rebuild_server_with_numa(self): """Create a NUMA instance and ensure it can be rebuilt. """ # Create a flavor consuming 2 pinned cpus with an implicit # numa topology of 1 virtual numa node. extra_spec = {'hw:cpu_policy': 'dedicated'} flavor_id = self._create_flavor(extra_spec=extra_spec) # Create a host with 4 physical cpus to allow rebuild leveraging # the free space to ensure the numa topology filter does not # eliminate the host. host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=4, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='compute1') server = self._create_active_server( server_args={"flavorRef": flavor_id}) # this should succeed as the NUMA topology has not changed # and we have enough resources on the host. We rebuild with # a different image to force the rebuild to query the scheduler # to validate the host. self._rebuild_server(server, self.image_ref_1)
def test_success(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='test_compute0') flavor = self._create_flavor( extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1' }) server = self._build_server(flavor) created = self.api.post_server({'server': server}) instance = self.api.get_server(created['id']) instance = self._wait_for_state_change(instance, 'BUILD') self.assertEqual('ACTIVE', instance['status']) self._delete_server(instance['id'])
def test_rebuild_server_with_numa_inplace_fails(self): """Create a NUMA instance and ensure in place rebuild fails. """ # Create a flavor consuming 2 pinned cpus with an implicit # numa topology of 1 virtual numa node. extra_spec = {'hw:cpu_policy': 'dedicated'} flavor_id = self._create_flavor(extra_spec=extra_spec) # cpu_cores is set to 2 to ensure that we have enough space # to boot the vm but not enough space to rebuild # by doubling the resource use during scheduling. host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='compute1') server = self._create_active_server( server_args={"flavorRef": flavor_id}) # This should succeed as the numa constraints do not change. self._rebuild_server(server, self.image_ref_1)
def test_create_server_with_VF_no_PF(self, img_mock): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, num_vfs=4) fake_connection = self._get_connection(host_info, pci_info) # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name} extra_spec_vfs = {"pci_passthrough:alias": "%s:1" % self.vfs_alias_name} flavor_id = self._create_flavor(extra_spec=extra_spec) flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs) host_pass_mock = self._get_pci_passthrough_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.pci_passthrough_filter.PciPassthroughFilter' '.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): vf_server = self._run_build_test(flavor_id_vfs, filter_mock) pf_server = self._run_build_test(flavor_id, filter_mock, end_status='ERROR') self._delete_server(pf_server['id']) self._delete_server(vf_server['id'])
def test_success(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = fakelibvirt.Connection( 'qemu:///system', version=fakelibvirt.FAKE_LIBVIRT_VERSION, hv_version=fakelibvirt.FAKE_QEMU_VERSION, host_info=host_info) with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): self.compute = self.start_service('compute', host='test_compute0') flavor = self._create_flavor( extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1' }) server = self._build_server(flavor) created = self.api.post_server({'server': server}) instance = self.api.get_server(created['id']) instance = self._wait_for_state_change(instance, 'BUILD') self.assertEqual('ACTIVE', instance['status']) self._delete_server(instance['id'])
def test_create_server_with_pci_dev_and_numa(self, img_mock): """Verifies that an instance can be booted with cpu pinning and with an assigned pci device. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, numa_node=1) fake_connection = self._get_connection(host_info, pci_info) # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name, 'hw:numa_nodes': '1', 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer'} flavor_id = self._create_flavor(extra_spec=extra_spec) host_pass_mock = self._get_pci_passthrough_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.pci_passthrough_filter.PciPassthroughFilter' '.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): pf_server = self._run_build_test(flavor_id, filter_mock) self._delete_server(pf_server['id'])
def test_invalid_libvirt_version(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = fakelibvirt.Connection('qemu:///system', version=1002007, hv_version=2001000, host_info=host_info) with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): self.compute = self.start_service('compute', host='test_compute0') fake_network.set_stub_network_methods(self) flavor = self._create_flavor( extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1' }) server = self._build_server(flavor) created = self.api.post_server({'server': server}) instance = self.api.get_server(created['id']) instance = self._wait_for_state_change(instance, 'BUILD') # Realtime policy not supported by hypervisor self.assertEqual('ERROR', instance['status']) self._delete_server(instance['id'])
def test_create_server_with_pci_dev_and_numa(self): """Validate behavior of 'preferred' PCI NUMA policy. This test ensures that it *is* possible to allocate CPU and memory resources from one NUMA node and a PCI device from another *if* PCI NUMA policies are in use. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # boot one instance with no PCI device to "fill up" NUMA node 0 extra_spec = { 'hw:cpu_policy': 'dedicated', } flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec) self._run_build_test(flavor_id) # now boot one with a PCI device, which should succeed thanks to the # use of the PCI policy extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id)
def test_create_server_with_numa_fails(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {'hw:numa_nodes': '2'} flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id, end_status='ERROR')
def test_create_server_with_PF(self, img_mock): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPciSRIOVDevicesInfo() fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name} flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id)
def test_get_server_diagnostics_server_with_VF(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo() fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.VFS_ALIAS_NAME} flavor_id = self._create_flavor(extra_spec=extra_spec) if not self.compute_started: self.compute = self.start_service('compute', host='test_compute0') self.compute_started = True # Create server good_server = self._build_server( flavor_id, '155d900f-4e14-4e4c-a73d-069cbf4541e6') good_server['networks'] = [ { 'uuid': base.LibvirtNeutronFixture.network_1['id'] }, { 'uuid': base.LibvirtNeutronFixture.network_4['id'] }, ] post = {'server': good_server} created_server = self.api.post_server(post) self._wait_for_state_change(created_server, 'BUILD') diagnostics = self.api.get_server_diagnostics(created_server['id']) self.assertEqual( base.LibvirtNeutronFixture.network_1_port_2['mac_address'], diagnostics['nic_details'][0]['mac_address']) self.assertEqual( base.LibvirtNeutronFixture.network_4_port_1['mac_address'], diagnostics['nic_details'][1]['mac_address']) self.assertIsNotNone(diagnostics['nic_details'][0]['tx_packets']) self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])
def test_create_server_with_VF_no_PF(self, img_mock): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=4) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec_pfs = {"pci_passthrough:alias": "%s:1" % self.PFS_ALIAS_NAME} extra_spec_vfs = {"pci_passthrough:alias": "%s:1" % self.VFS_ALIAS_NAME} flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs) flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs) self._run_build_test(flavor_id_vfs) self._run_build_test(flavor_id_pfs, end_status='ERROR')
def test_create_server_with_numa_fails(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) # Create a flavor extra_spec = {'hw:numa_nodes': '2'} flavor_id = self._create_flavor(extra_spec=extra_spec) host_pass_mock = self._get_topology_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.numa_topology_filter.NUMATopologyFilter.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): self._run_build_test(flavor_id, filter_mock, end_status='ERROR')
def _test_create_server_with_networks(self, flavor_id, networks): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection self.compute = self.start_service('compute', host='test_compute0') # Create server good_server = self._build_server(flavor_id) good_server['networks'] = networks post = {'server': good_server} created_server = self.api.post_server(post) LOG.debug("created_server: %s", created_server) found_server = self.api.get_server(created_server['id']) return self._wait_for_state_change(found_server, 'BUILD')
def test_create_server_with_pci_dev_and_numa(self, img_mock): """Verifies that an instance can be booted with cpu pinning and with an assigned pci device. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, numa_node=1) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name, 'hw:numa_nodes': '1', 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer'} flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id)
def test_create_server_with_pinning(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=5, cpu_threads=2, kB_mem=15740000) fake_connection = self._get_connection(host_info=host_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec = { 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer', } flavor_id = self._create_flavor(vcpu=5, extra_spec=extra_spec) server = self._run_build_test(flavor_id) ctx = nova_context.get_admin_context() inst = objects.Instance.get_by_uuid(ctx, server['id']) self.assertEqual(1, len(inst.numa_topology.cells)) self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores)
def test_create_server_with_pci_dev_and_numa(self): """Verifies that an instance can be booted with cpu pinning and with an assigned pci device. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # create a flavor extra_spec = { 'hw:cpu_policy': 'dedicated', 'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME, } flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(flavor_id)
def test_create_server_with_pci_dev_and_numa_fails(self, img_mock): """This test ensures that it is not possible to allocated CPU and memory resources from one NUMA node and a PCI device from another. """ host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, numa_node=0) fake_connection = self._get_connection(host_info, pci_info) self.mock_conn.return_value = fake_connection # Create a flavor extra_spec_vm = {'hw:cpu_policy': 'dedicated', 'hw:numa_node': '1'} extra_spec = {'pci_passthrough:alias': '%s:1' % self.pfs_alias_name, 'hw:numa_nodes': '1', 'hw:cpu_policy': 'dedicated', 'hw:cpu_thread_policy': 'prefer'} vm_flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec_vm) pf_flavor_id = self._create_flavor(extra_spec=extra_spec) self._run_build_test(vm_flavor_id) self._run_build_test(pf_flavor_id, end_status='ERROR')
def test_cold_migrate_with_physnet(self): host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) # Start services self.computes = {} for host in ['test_compute0', 'test_compute1']: fake_connection = self._get_connection(host_info=host_info) fake_connection.getHostname = lambda: host # This is fun. Firstly we need to do a global'ish mock so we can # actually start the service. with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): compute = self.start_service('compute', host=host) # Once that's done, we need to do some tweaks to each individual # compute "service" to make sure they return unique objects compute.driver._host.get_connection = lambda: fake_connection self.computes[host] = compute # Create server extra_spec = {'hw:numa_nodes': '1'} flavor_id = self._create_flavor(extra_spec=extra_spec) networks = [ { 'uuid': base.LibvirtNeutronFixture.network_1['id'] }, ] good_server = self._build_server(flavor_id) good_server['networks'] = networks post = {'server': good_server} created_server = self.api.post_server(post) server = self._wait_for_state_change(created_server, 'BUILD') self.assertEqual('ACTIVE', server['status']) original_host = server['OS-EXT-SRV-ATTR:host'] # We reset mock_filter because we want to ensure it's called as part of # the *migration* self.mock_filter.reset_mock() self.assertEqual(0, len(self.mock_filter.call_args_list)) # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should # probably be less...dumb with mock.patch( 'nova.virt.libvirt.driver.LibvirtDriver' '.migrate_disk_and_power_off', return_value='{}'): self.api.post_server_action(server['id'], {'migrate': None}) server = self._wait_for_state_change(created_server, 'VERIFY_RESIZE') # We don't bother confirming the resize as we expect this to have # landed and all we want to know is whether the filter was correct self.assertNotEqual(original_host, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(1, len(self.mock_filter.call_args_list)) args, kwargs = self.mock_filter.call_args_list[0] self.assertEqual(2, len(args)) self.assertEqual({}, kwargs) network_metadata = args[1].network_metadata self.assertIsNotNone(network_metadata) self.assertEqual(set(['foo']), network_metadata.physnets)