Example #1
0
    def test_live_migrate_server_with_pci(self):
        """Live migrate an instance with a PCI passthrough device.

        This should fail because it's not possible to live migrate an instance
        with a PCI passthrough device, even if it's a SR-IOV VF.
        """

        # start two compute services
        self.start_compute(hostname='test_compute0',
                           pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
        self.start_compute(hostname='test_compute1',
                           pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))

        # create a server
        extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        server = self._create_server(flavor_id=flavor_id, networks='none')

        # now live migrate that server
        ex = self.assertRaises(client.OpenStackApiException,
                               self._live_migrate, server, 'completed')
        # NOTE(stephenfin): this wouldn't happen in a real deployment since
        # live migration is a cast, but since we are using CastAsCall this will
        # bubble to the API
        self.assertEqual(500, ex.response.status_code)
        self.assertIn('NoValidHost', str(ex))
Example #2
0
    def test_create_server_with_neutron(self):
        """Create an instance using a neutron-provisioned SR-IOV VIF."""

        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)

        orig_create = nova.virt.libvirt.guest.Guest.create

        def fake_create(cls, xml, host):
            tree = etree.fromstring(xml)
            elem = tree.find('./devices/interface/source/address')

            # compare address
            expected = ('0x81', '0x00', '0x2')
            actual = (
                elem.get('bus'),
                elem.get('slot'),
                elem.get('function'),
            )
            self.assertEqual(expected, actual)

            return orig_create(xml, host)

        self.stub_out(
            'nova.virt.libvirt.guest.Guest.create',
            fake_create,
        )

        self.start_compute(pci_info=pci_info)

        # create the port
        self.neutron.create_port({'port': self.neutron.network_4_port_1})

        # ensure the binding details are currently unset
        port = self.neutron.show_port(
            base.LibvirtNeutronFixture.network_4_port_1['id'], )['port']
        self.assertNotIn('binding:profile', port)

        # create a server using the VF via neutron
        flavor_id = self._create_flavor()
        self._create_server(
            flavor_id=flavor_id,
            networks=[
                {
                    'port': base.LibvirtNeutronFixture.network_4_port_1['id']
                },
            ],
        )

        # ensure the binding details sent to "neutron" were correct
        port = self.neutron.show_port(
            base.LibvirtNeutronFixture.network_4_port_1['id'], )['port']
        self.assertIn('binding:profile', port)
        self.assertEqual(
            {
                'pci_vendor_info': '8086:1515',
                'pci_slot': '0000:81:00.2',
                'physical_network': 'physnet4',
            },
            port['binding:profile'],
        )
Example #3
0
 def _start_compute_service(self, hostname):
     fake_connection = self._get_connection(
         host_info=fakelibvirt.HostInfo(cpu_nodes=2, kB_mem=8192),
         # We want to create two pGPUs but no other PCI devices
         pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=0,
                                                 num_pfs=0,
                                                 num_vfs=0,
                                                 num_mdevcap=2),
         hostname=hostname)
     with mock.patch('nova.virt.libvirt.host.Host.get_connection',
                     return_value=fake_connection):
         # this method will update a self.computes dict keyed by hostname
         compute = self._start_compute(hostname)
         compute.driver._host.get_connection = lambda: fake_connection
     rp_uuid = self._get_provider_uuid_by_name(hostname)
     rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid)
     for rp in rp_uuids:
         inventory = self._get_provider_inventory(rp)
         if orc.VGPU in inventory:
             usage = self._get_provider_usages(rp)
             self.assertEqual(16, inventory[orc.VGPU]['total'])
             self.assertEqual(0, usage[orc.VGPU])
     # Since we haven't created any mdevs yet, we shouldn't find them
     self.assertEqual([], compute.driver._get_mediated_devices())
     return compute
Example #4
0
    def test_create_server_with_pci_dev_and_numa_fails(self):
        """This test ensures that it is not possible to allocated CPU and
           memory resources from one NUMA node and a PCI device from another.
        """

        host_info = fakelibvirt.HostInfo(cpu_nodes=2,
                                         cpu_sockets=1,
                                         cpu_cores=2,
                                         cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # boot one instance with no PCI device to "fill up" NUMA node 0
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
        }
        flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)

        self._run_build_test(flavor_id)

        # now boot one with a PCI device, which should fail to boot
        extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        self._run_build_test(flavor_id, end_status='ERROR')
Example #5
0
    def test_create_server_with_pci_dev_and_numa(self):
        """Validate behavior of 'preferred' PCI NUMA policy.

        This test ensures that it *is* possible to allocate CPU and memory
        resources from one NUMA node and a PCI device from another *if* PCI
        NUMA policies are in use.
        """

        self.flags(cpu_dedicated_set='0-7', group='compute')

        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
        self.start_compute(pci_info=pci_info)

        # boot one instance with no PCI device to "fill up" NUMA node 0
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
        }
        flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
        self._create_server(flavor_id=flavor_id)

        # now boot one with a PCI device, which should succeed thanks to the
        # use of the PCI policy
        extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        self._create_server(flavor_id=flavor_id,
                            expected_state=self.expected_state)
Example #6
0
    def test_create_server_with_pci_dev_and_numa(self):
        """Validate behavior of 'preferred' PCI NUMA policy.

        This test ensures that it *is* possible to allocate CPU and memory
        resources from one NUMA node and a PCI device from another *if* PCI
        NUMA policies are in use.
        """

        host_info = fakelibvirt.HostInfo(cpu_nodes=2,
                                         cpu_sockets=1,
                                         cpu_cores=2,
                                         cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # boot one instance with no PCI device to "fill up" NUMA node 0
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
        }
        flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)

        self._run_build_test(flavor_id)

        # now boot one with a PCI device, which should succeed thanks to the
        # use of the PCI policy
        extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        self._run_build_test(flavor_id)
Example #7
0
    def test_create_servers_with_specific_type(self):
        # Regenerate the PCI addresses so both pGPUs now support nvidia-12
        connection = self.computes[
            self.compute1.host].driver._host.get_connection()
        connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
            num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
            multiple_gpu_types=True)
        # Make a restart to update the Resource Providers
        self.compute1 = self.restart_compute_service(self.compute1)
        pgpu1_rp_uuid = self._get_provider_uuid_by_name(
            self.compute1.host + '_' + fakelibvirt.PGPU1_PCI_ADDR)
        pgpu2_rp_uuid = self._get_provider_uuid_by_name(
            self.compute1.host + '_' + fakelibvirt.PGPU2_PCI_ADDR)

        pgpu1_inventory = self._get_provider_inventory(pgpu1_rp_uuid)
        self.assertEqual(16, pgpu1_inventory[orc.VGPU]['total'])
        pgpu2_inventory = self._get_provider_inventory(pgpu2_rp_uuid)
        self.assertEqual(8, pgpu2_inventory[orc.VGPU]['total'])

        # Attach traits to the pGPU RPs
        self._set_provider_traits(pgpu1_rp_uuid, ['CUSTOM_NVIDIA_11'])
        self._set_provider_traits(pgpu2_rp_uuid, ['CUSTOM_NVIDIA_12'])

        expected = {'CUSTOM_NVIDIA_11': fakelibvirt.PGPU1_PCI_ADDR,
                    'CUSTOM_NVIDIA_12': fakelibvirt.PGPU2_PCI_ADDR}

        for trait in expected.keys():
            # Add a trait to the flavor
            extra_spec = {"resources:VGPU": "1",
                          "trait:%s" % trait: "required"}
            flavor = self._create_flavor(extra_spec=extra_spec)

            # Use the new flavor for booting
            server = self._create_server(
                image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
                flavor_id=flavor, host=self.compute1.host,
                expected_state='ACTIVE')

            # Get the instance we just created
            inst = objects.Instance.get_by_uuid(self.context, server['id'])
            # Get the mdevs that were allocated for this instance, we should
            # only have one
            mdevs = self.compute1.driver._get_all_assigned_mediated_devices(
                inst)
            self.assertEqual(1, len(mdevs))

            # It's a dict of mdev_uuid/instance_uuid pairs, we only care about
            # the keys
            mdevs = list(mdevs.keys())
            # Now get the detailed information about this single mdev
            mdev_info = self.compute1.driver._get_mediated_device_information(
                libvirt_utils.mdev_uuid2name(mdevs[0]))

            # We can be deterministic : since we asked for a specific type,
            # we know which pGPU we landed.
            self.assertEqual(expected[trait], mdev_info['parent'])
Example #8
0
    def test_create_server_with_PF(self):
        """Create a server with an SR-IOV PF-type PCI device."""

        pci_info = fakelibvirt.HostPCIDevicesInfo()
        self.start_compute(pci_info=pci_info)

        # create a server
        extra_spec = {"pci_passthrough:alias": "%s:1" % self.PFS_ALIAS_NAME}
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        self._create_server(flavor_id=flavor_id, networks='none')

        # ensure the filter was called
        self.assertTrue(self.mock_filter.called)
Example #9
0
    def test_create_server_with_PF(self):

        host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
                                         cpu_cores=2, cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo()
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # Create a flavor
        extra_spec = {"pci_passthrough:alias": "%s:1" % self.PFS_ALIAS_NAME}
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        self._run_build_test(flavor_id)
Example #10
0
    def test_get_server_diagnostics_server_with_VF(self):

        host_info = fakelibvirt.HostInfo(cpu_nodes=2,
                                         cpu_sockets=1,
                                         cpu_cores=2,
                                         cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo()
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # Create a flavor
        extra_spec = {"pci_passthrough:alias": "%s:1" % self.VFS_ALIAS_NAME}
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        if not self.compute_started:
            self.compute = self.start_service('compute', host='test_compute0')
            self.compute_started = True

        # Create server
        good_server = self._build_server(
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)
        good_server['networks'] = [
            {
                'uuid': base.LibvirtNeutronFixture.network_1['id']
            },
            {
                'uuid': base.LibvirtNeutronFixture.network_4['id']
            },
        ]

        post = {'server': good_server}
        created_server = self.api.post_server(post)
        self._wait_for_state_change(created_server, 'ACTIVE')

        diagnostics = self.api.get_server_diagnostics(created_server['id'])

        self.assertEqual(
            base.LibvirtNeutronFixture.network_1_port_2['mac_address'],
            diagnostics['nic_details'][0]['mac_address'])

        self.assertEqual(
            base.LibvirtNeutronFixture.network_4_port_1['mac_address'],
            diagnostics['nic_details'][1]['mac_address'])

        self.assertIsNotNone(diagnostics['nic_details'][0]['tx_packets'])

        self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])
Example #11
0
    def _test_detach_attach(self, first_port_id, second_port_id):
        # This test takes two ports that requires PCI claim.
        # Starts a compute with one PF and one connected VF.
        # Then starts a VM with the first port. Then detach it, then
        # re-attach it. These expected to be successful. Then try to attach the
        # second port and asserts that it fails as no free PCI device left on
        # the host.
        host_info = fakelibvirt.HostInfo(cpu_nodes=2,
                                         cpu_sockets=1,
                                         cpu_cores=2,
                                         cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        self.compute = self.start_service('compute', host='test_compute0')

        # Create server with a port
        server = self._create_server(networks=[{'port': first_port_id}])

        updated_port = self.neutron.show_port(first_port_id)['port']
        self.assertEqual('test_compute0', updated_port['binding:host_id'])
        self.assertIn(first_port_id, self._get_attached_port_ids(server['id']))

        self._detach_port(server['id'], first_port_id)

        updated_port = self.neutron.show_port(first_port_id)['port']
        self.assertIsNone(updated_port['binding:host_id'])
        self.assertNotIn(first_port_id,
                         self._get_attached_port_ids(server['id']))

        # Attach back the port
        self._attach_port(server['id'], first_port_id)

        updated_port = self.neutron.show_port(first_port_id)['port']
        self.assertEqual('test_compute0', updated_port['binding:host_id'])
        self.assertIn(first_port_id, self._get_attached_port_ids(server['id']))

        # Try to attach the second port but no free PCI device left
        ex = self.assertRaises(client.OpenStackApiException, self._attach_port,
                               server['id'], second_port_id)

        self.assertEqual(400, ex.response.status_code)
        self.assertIn('Failed to claim PCI device', str(ex))
        attached_ports = self._get_attached_port_ids(server['id'])
        self.assertIn(first_port_id, attached_ports)
        self.assertNotIn(second_port_id, attached_ports)
Example #12
0
    def test_create_server_with_pci_dev_and_numa(self):
        """Verifies that an instance can be booted with cpu pinning and with an
           assigned pci device.
        """

        self.flags(cpu_dedicated_set='0-7', group='compute')

        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
        self.start_compute(pci_info=pci_info)

        # create a flavor
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
            'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
        }
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        self._create_server(flavor_id=flavor_id, networks='none')
Example #13
0
    def test_create_server_with_VF_no_PF(self, img_mock):

        host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
                                             cpu_cores=2, cpu_threads=2,
                                             kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=4)
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # Create a flavor
        extra_spec_pfs = {"pci_passthrough:alias": "%s:1" %
                          self.PFS_ALIAS_NAME}
        extra_spec_vfs = {"pci_passthrough:alias": "%s:1" %
                          self.VFS_ALIAS_NAME}
        flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs)
        flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)

        self._run_build_test(flavor_id_vfs)
        self._run_build_test(flavor_id_pfs, end_status='ERROR')
Example #14
0
    def test_create_server_with_pci_dev_and_numa(self):
        """Verifies that an instance can be booted with cpu pinning and with an
           assigned pci device.
        """

        host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
                                         cpu_cores=2, cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # create a flavor
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
            'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
        }
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        self._run_build_test(flavor_id)
Example #15
0
    def test_create_server_with_VF_no_PF(self):
        """Create a server with a VF and ensure the PF is then reserved."""

        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=4)
        self.start_compute(pci_info=pci_info)

        # create a server using the VF
        extra_spec_vfs = {'pci_passthrough:alias': f'{self.VFS_ALIAS_NAME}:1'}
        flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
        self._create_server(flavor_id=flavor_id_vfs, networks='none')

        # now attempt to build another server, this time using the PF; this
        # should fail because the PF is used by an instance
        extra_spec_pfs = {'pci_passthrough:alias': f'{self.PFS_ALIAS_NAME}:1'}
        flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs)
        self._create_server(
            flavor_id=flavor_id_pfs,
            networks='none',
            expected_state='ERROR',
        )
Example #16
0
    def test_create_server_with_pci_dev_and_numa_fails(self):
        """This test ensures that it is not possible to allocated CPU and
           memory resources from one NUMA node and a PCI device from another.
        """

        self.flags(cpu_dedicated_set='0-7', group='compute')

        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
        self.start_compute(pci_info=pci_info)

        # boot one instance with no PCI device to "fill up" NUMA node 0
        extra_spec = {'hw:cpu_policy': 'dedicated'}
        flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
        self._create_server(flavor_id=flavor_id, networks='none')

        # now boot one with a PCI device, which should fail to boot
        extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        self._create_server(flavor_id=flavor_id,
                            networks='none',
                            expected_state='ERROR')
Example #17
0
 def start_compute(self, hostname):
     hostname = super().start_compute(
         pci_info=fakelibvirt.HostPCIDevicesInfo(
             num_pci=0,
             num_pfs=0,
             num_vfs=0,
             num_mdevcap=2,
         ),
         hostname=hostname,
     )
     compute = self.computes[hostname]
     rp_uuid = self.compute_rp_uuids[hostname]
     rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid)
     for rp in rp_uuids:
         inventory = self._get_provider_inventory(rp)
         if orc.VGPU in inventory:
             usage = self._get_provider_usages(rp)
             self.assertEqual(16, inventory[orc.VGPU]['total'])
             self.assertEqual(0, usage[orc.VGPU])
     # Since we haven't created any mdevs yet, we shouldn't find them
     self.assertEqual([], compute.driver._get_mediated_devices())
     return compute
Example #18
0
    def test_get_server_diagnostics_server_with_VF(self):
        """Ensure server disagnostics include info on VF-type PCI devices."""

        pci_info = fakelibvirt.HostPCIDevicesInfo()
        self.start_compute(pci_info=pci_info)

        # create the SR-IOV port
        self.neutron.create_port({'port': self.neutron.network_4_port_1})

        # create a server using the VF and multiple networks
        extra_spec = {'pci_passthrough:alias': f'{self.VFS_ALIAS_NAME}:1'}
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        server = self._create_server(
            flavor_id=flavor_id,
            networks=[
                {
                    'uuid': base.LibvirtNeutronFixture.network_1['id']
                },
                {
                    'port': base.LibvirtNeutronFixture.network_4_port_1['id']
                },
            ],
        )

        # now check the server diagnostics to ensure the VF-type PCI device is
        # attached
        diagnostics = self.admin_api.get_server_diagnostics(server['id'])

        self.assertEqual(
            base.LibvirtNeutronFixture.network_1_port_2['mac_address'],
            diagnostics['nic_details'][0]['mac_address'],
        )
        self.assertIsNotNone(diagnostics['nic_details'][0]['tx_packets'])

        self.assertEqual(
            base.LibvirtNeutronFixture.network_4_port_1['mac_address'],
            diagnostics['nic_details'][1]['mac_address'],
        )
        self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])
Example #19
0
    def _test_policy(self, pci_numa_node, status, policy):
        host_info = fakelibvirt.HostInfo(cpu_nodes=2,
                                         cpu_sockets=1,
                                         cpu_cores=2,
                                         cpu_threads=2,
                                         kB_mem=15740000)
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1,
                                                  numa_node=pci_numa_node)
        fake_connection = self._get_connection(host_info, pci_info)
        self.mock_conn.return_value = fake_connection

        # only allow cpus on numa node 1 to be used for pinning
        self.flags(cpu_dedicated_set='4-7', group='compute')

        # request cpu pinning to create a numa toplogy and allow the test to
        # force which numa node the vm would have to be pinned too.
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
            'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
            'hw:pci_numa_affinity_policy': policy
        }
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        self._run_build_test(flavor_id, end_status=status)
Example #20
0
    def _start_compute_service(self, hostname):
        self.fake_connection = self._get_connection(
            host_info=fakelibvirt.HostInfo(cpu_nodes=2, kB_mem=8192),
            # We want to create two pGPUs but no other PCI devices
            pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=0,
                                                    num_pfs=0,
                                                    num_vfs=0,
                                                    num_mdevcap=2),
            hostname=hostname)

        self.mock_conn.return_value = self.fake_connection
        compute = self.start_service('compute', host=hostname)
        rp_uuid = self._get_provider_uuid_by_name(hostname)
        rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid)
        for rp in rp_uuids:
            inventory = self._get_provider_inventory(rp)
            if orc.VGPU in inventory:
                usage = self._get_provider_usages(rp)
                self.assertEqual(16, inventory[orc.VGPU]['total'])
                self.assertEqual(0, usage[orc.VGPU])
        # Since we haven't created any mdevs yet, we shouldn't find them
        self.assertEqual([], compute.driver._get_mediated_devices())
        return compute
Example #21
0
    def _test_policy(self, pci_numa_node, status, policy):
        # only allow cpus on numa node 1 to be used for pinning
        self.flags(cpu_dedicated_set='4-7', group='compute')

        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1,
                                                  numa_node=pci_numa_node)
        self.start_compute(pci_info=pci_info)

        # request cpu pinning to create a numa toplogy and allow the test to
        # force which numa node the vm would have to be pinned too.
        extra_spec = {
            'hw:cpu_policy': 'dedicated',
            'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
            'hw:pci_numa_affinity_policy': policy
        }
        flavor_id = self._create_flavor(extra_spec=extra_spec)
        self._create_server(flavor_id=flavor_id, expected_state=status)

        if status == 'ACTIVE':
            self.assertTrue(self.mock_filter.called)
        else:
            # the PciPassthroughFilter should not have been called, since the
            # NUMATopologyFilter should have eliminated the filter first
            self.assertFalse(self.mock_filter.called)
    def test_pci_devices_generation(self):
        def _cmp_pci_dev_addr(dev_xml, cmp_addr):
            cfgdev = vconfig.LibvirtConfigNodeDevice()
            cfgdev.parse_str(dev_xml)

            address = "%04x:%02x:%02x.%1x" % (
                cfgdev.pci_capability.domain, cfgdev.pci_capability.bus,
                cfgdev.pci_capability.slot, cfgdev.pci_capability.function)
            self.assertEqual(cmp_addr, address)

        pf_xml = """<device>
  <name>pci_0000_81_00_0</name>
  <path>/sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0</path>
  <parent>pci_0000_80_01_0</parent>
  <driver>
    <name>ixgbe</name>
  </driver>
  <capability type='pci'>
    <domain>0</domain>
    <bus>129</bus>
    <slot>0</slot>
    <function>0</function>
    <product id='0x1528'>Ethernet Controller 10-Gigabit X540-AT2</product>
    <vendor id='0x8086'>Intel Corporation</vendor>
    <capability type='virt_functions'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x1'/>
    </capability>
    <iommuGroup number='40'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x0'/>
    </iommuGroup>
    <numa node='0'/>
    <pci-express>
      <link validity='cap' port='0' speed='5' width='8'/>
      <link validity='sta' speed='5' width='8'/>
    </pci-express>
  </capability>
</device>"""
        vf_xml = """<device>
  <name>pci_0000_81_00_1</name>
  <path>/sys/devices/pci0000:80/0000:80:01.0/0000:81:00.1</path>
  <parent>pci_0000_80_01_0</parent>
  <driver>
    <name>ixgbevf</name>
  </driver>
  <capability type='pci'>
    <domain>0</domain>
    <bus>129</bus>
    <slot>0</slot>
    <function>1</function>
    <product id='0x1515'>X540 Ethernet Controller Virtual Function</product>
    <vendor id='0x8086'>Intel Corporation</vendor>
    <capability type='phys_function'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x0'/>
    </capability>
    <iommuGroup number='41'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x1'/>
    </iommuGroup>
    <numa node='0'/>
    <pci-express>
      <link validity='cap' port='0' speed='5' width='8'/>
      <link validity='sta' speed='5' width='8'/>
    </pci-express>
  </capability>
</device>"""

        # create fake pci devices
        pci_info = libvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)

        # generate xml for the created pci devices
        gen_pf = pci_info.get_device_by_name('pci_0000_81_00_0')
        gen_vf = pci_info.get_device_by_name('pci_0000_81_00_1')

        self.assertXmlEqual(gen_pf.XMLDesc(0), pf_xml)
        self.assertXmlEqual(gen_vf.XMLDesc(0), vf_xml)

        # parse the generated xml with a libvirt config class and compare
        # device address
        _cmp_pci_dev_addr(pf_xml, '0000:81:00.0')
        _cmp_pci_dev_addr(vf_xml, '0000:81:00.1')
Example #23
0
    def test_live_migrate_server_with_neutron(self):
        """Live migrate an instance using a neutron-provisioned SR-IOV VIF.

        This should succeed since we support this, via detach and attach of the
        PCI device.
        """

        # start two compute services with differing PCI device inventory
        self.start_compute(hostname='test_compute0',
                           pci_info=fakelibvirt.HostPCIDevicesInfo(
                               num_pfs=2, num_vfs=8, numa_node=0))
        self.start_compute(hostname='test_compute1',
                           pci_info=fakelibvirt.HostPCIDevicesInfo(
                               num_pfs=1, num_vfs=2, numa_node=1))

        # create the port
        self.neutron.create_port({'port': self.neutron.network_4_port_1})

        # create a server using the VF via neutron
        extra_spec = {'hw:cpu_policy': 'dedicated'}
        flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
        server = self._create_server(
            flavor_id=flavor_id,
            networks=[
                {
                    'port': base.LibvirtNeutronFixture.network_4_port_1['id']
                },
            ],
            host='test_compute0',
        )

        # our source host should have marked two PCI devices as used, the VF
        # and the parent PF, while the future destination is currently unused
        self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
        self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
        self.assertPCIDeviceCounts('test_compute1', total=3, free=3)

        # the instance should be on host NUMA node 0, since that's where our
        # PCI devices are
        host_numa = objects.NUMATopology.obj_from_db_obj(
            objects.ComputeNode.get_by_nodename(
                self.ctxt,
                'test_compute0',
            ).numa_topology)
        self.assertEqual({0, 1, 2, 3}, host_numa.cells[0].pinned_cpus)
        self.assertEqual(set(), host_numa.cells[1].pinned_cpus)

        # ensure the binding details sent to "neutron" are correct
        port = self.neutron.show_port(
            base.LibvirtNeutronFixture.network_4_port_1['id'], )['port']
        self.assertIn('binding:profile', port)
        self.assertEqual(
            {
                'pci_vendor_info': '8086:1515',
                # TODO(stephenfin): Stop relying on a side-effect of how nova
                # chooses from multiple PCI devices (apparently the last
                # matching one)
                'pci_slot': '0000:81:01.4',
                'physical_network': 'physnet4',
            },
            port['binding:profile'],
        )

        # now live migrate that server
        self._live_migrate(server, 'completed')

        # we should now have transitioned our usage to the destination, freeing
        # up the source in the process
        self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
        self.assertPCIDeviceCounts('test_compute1', total=3, free=1)

        # the instance should now be on host NUMA node 1, since that's where
        # our PCI devices are for this second host
        host_numa = objects.NUMATopology.obj_from_db_obj(
            objects.ComputeNode.get_by_nodename(
                self.ctxt,
                'test_compute1',
            ).numa_topology)
        self.assertEqual(set(), host_numa.cells[0].pinned_cpus)
        self.assertEqual({4, 5, 6, 7}, host_numa.cells[1].pinned_cpus)

        # ensure the binding details sent to "neutron" have been updated
        port = self.neutron.show_port(
            base.LibvirtNeutronFixture.network_4_port_1['id'], )['port']
        self.assertIn('binding:profile', port)
        self.assertEqual(
            {
                'pci_vendor_info': '8086:1515',
                'pci_slot': '0000:81:00.2',
                'physical_network': 'physnet4',
            },
            port['binding:profile'],
        )
Example #24
0
    def test_cold_migrate_server_with_pci(self):

        host_devices = {}
        orig_create = nova.virt.libvirt.guest.Guest.create

        def fake_create(cls, xml, host):
            tree = etree.fromstring(xml)
            elem = tree.find('./devices/hostdev/source/address')

            hostname = host.get_hostname()
            address = (
                elem.get('bus'),
                elem.get('slot'),
                elem.get('function'),
            )
            if hostname in host_devices:
                self.assertNotIn(address, host_devices[hostname])
            else:
                host_devices[hostname] = []
            host_devices[host.get_hostname()].append(address)

            return orig_create(xml, host)

        self.stub_out(
            'nova.virt.libvirt.guest.Guest.create',
            fake_create,
        )

        # start two compute services
        for hostname in ('test_compute0', 'test_compute1'):
            pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
            self.start_compute(hostname=hostname, pci_info=pci_info)

        # boot an instance with a PCI device on each host
        extra_spec = {
            'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
        }
        flavor_id = self._create_flavor(extra_spec=extra_spec)

        server_a = self._create_server(flavor_id=flavor_id,
                                       networks='none',
                                       host='test_compute0')
        server_b = self._create_server(flavor_id=flavor_id,
                                       networks='none',
                                       host='test_compute1')

        # the instances should have landed on separate hosts; ensure both hosts
        # have one used PCI device and one free PCI device
        self.assertNotEqual(
            server_a['OS-EXT-SRV-ATTR:host'],
            server_b['OS-EXT-SRV-ATTR:host'],
        )
        for hostname in ('test_compute0', 'test_compute1'):
            self.assertPCIDeviceCounts(hostname, total=2, free=1)

        # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
        # probably be less...dumb
        with mock.patch(
                'nova.virt.libvirt.driver.LibvirtDriver'
                '.migrate_disk_and_power_off',
                return_value='{}',
        ):
            # TODO(stephenfin): Use a helper
            self.api.post_server_action(server_a['id'], {'migrate': None})
            server_a = self._wait_for_state_change(server_a, 'VERIFY_RESIZE')

        # the instances should now be on the same host; ensure the source host
        # still has one used PCI device while the destination now has two used
        # test_compute0 initially
        self.assertEqual(
            server_a['OS-EXT-SRV-ATTR:host'],
            server_b['OS-EXT-SRV-ATTR:host'],
        )
        self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
        self.assertPCIDeviceCounts('test_compute1', total=2, free=0)

        # now, confirm the migration and check our counts once again
        self._confirm_resize(server_a)

        self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
        self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
Example #25
0
    def test_create_server_after_change_in_nonsriov_pf_to_sriov_pf(self):
        # Starts a compute with PF not configured with SRIOV capabilities
        # Updates the PF with SRIOV capability and restart the compute service
        # Then starts a VM with the sriov port. The VM should be in active
        # state with sriov port attached.

        # To emulate the device type changing, we first create a
        # HostPCIDevicesInfo object with PFs and VFs. Then we make a copy
        # and remove the VFs and the virt_function capability. This is
        # done to ensure the physical function product id is same in both
        # the versions.
        pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
        pci_info_no_sriov = copy.deepcopy(pci_info)

        # Disable SRIOV capabilties in PF and delete the VFs
        self._disable_sriov_in_pf(pci_info_no_sriov)

        fake_connection = self._get_connection(pci_info=pci_info_no_sriov,
                                               hostname='test_compute0')
        self.mock_conn.return_value = fake_connection

        self.compute = self.start_service('compute', host='test_compute0')

        ctxt = context.get_admin_context()
        pci_devices = objects.PciDeviceList.get_by_compute_node(
            ctxt,
            objects.ComputeNode.get_by_nodename(
                ctxt,
                'test_compute0',
            ).id,
        )
        self.assertEqual(1, len(pci_devices))
        self.assertEqual('type-PCI', pci_devices[0].dev_type)

        # Update connection with original pci info with sriov PFs
        fake_connection = self._get_connection(pci_info=pci_info,
                                               hostname='test_compute0')
        self.mock_conn.return_value = fake_connection

        # Restart the compute service
        self.restart_compute_service(self.compute)

        # Verify if PCI devices are of type type-PF or type-VF
        pci_devices = objects.PciDeviceList.get_by_compute_node(
            ctxt,
            objects.ComputeNode.get_by_nodename(
                ctxt,
                'test_compute0',
            ).id,
        )
        for pci_device in pci_devices:
            self.assertIn(pci_device.dev_type, ['type-PF', 'type-VF'])

        # create the port
        self.neutron.create_port({'port': self.neutron.network_4_port_1})

        # create a server using the VF via neutron
        flavor_id = self._create_flavor()
        self._create_server(
            flavor_id=flavor_id,
            networks=[
                {
                    'port': base.LibvirtNeutronFixture.network_4_port_1['id']
                },
            ],
        )