Beispiel #1
0
    def setUp(self):
        super(DifferentMdevClassesTests, self).setUp()
        self.extra_spec = {"resources:CUSTOM_NOTVGPU": "1"}
        self.flavor = self._create_flavor(extra_spec=self.extra_spec)

        self.flags(enabled_mdev_types=[
            fakelibvirt.MLX5_CORE_TYPE, fakelibvirt.NVIDIA_12_VGPU_TYPE
        ],
                   group='devices')
        # we need to call the below again to ensure the updated
        # 'device_addresses' value is read and the new groups created
        nova.conf.devices.register_dynamic_opts(CONF)
        # host1 will have 2 physical devices :
        #  - 0000:81:00.0 will only support mlx5_core
        #  - 0000:81:01.0 will only support nvidia-12
        MDEVCAP_DEV1_PCI_ADDR = self.libvirt2pci_address(
            fakelibvirt.MDEVCAP_DEV1_PCI_ADDR)
        MDEVCAP_DEV2_PCI_ADDR = self.libvirt2pci_address(
            fakelibvirt.MDEVCAP_DEV2_PCI_ADDR)
        self.flags(device_addresses=[MDEVCAP_DEV1_PCI_ADDR],
                   group='mdev_mlx5_core')
        self.flags(device_addresses=[MDEVCAP_DEV2_PCI_ADDR],
                   group='mdev_nvidia-12')
        self.flags(mdev_class='CUSTOM_NOTVGPU', group='mdev_mlx5_core')

        self.compute1 = self.start_compute('host1')
        # Regenerate the PCI addresses so they can support both mlx5 and
        # nvidia-12 types
        connection = self.computes[
            self.compute1.host].driver._host.get_connection()
        connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
            num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, generic_types=True)
        # Make a restart to update the Resource Providers
        self.compute1 = self.restart_compute_service(self.compute1)
Beispiel #2
0
 def start_compute(self, hostname):
     hostname = super().start_compute(
         pci_info=fakelibvirt.HostPCIDevicesInfo(
             num_pci=0,
             num_pfs=0,
             num_vfs=0,
             num_mdevcap=2,
         ),
         hostname=hostname,
     )
     compute = self.computes[hostname]
     rp_uuid = self.compute_rp_uuids[hostname]
     rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid)
     for rp in rp_uuids:
         inventory = self._get_provider_inventory(rp)
         if orc.VGPU in inventory:
             usage = self._get_provider_usages(rp)
             self.assertEqual(16, inventory[orc.VGPU]['total'])
             self.assertEqual(0, usage[orc.VGPU])
     # Since we haven't created any mdevs yet, we shouldn't find them
     self.assertEqual([], compute.driver._get_mediated_devices())
     return compute
Beispiel #3
0
    def test_pci_devices_generation(self):
        def _cmp_pci_dev_addr(dev_xml, cmp_addr):
            cfgdev = vconfig.LibvirtConfigNodeDevice()
            cfgdev.parse_str(dev_xml)

            address = "%04x:%02x:%02x.%1x" % (
                cfgdev.pci_capability.domain, cfgdev.pci_capability.bus,
                cfgdev.pci_capability.slot, cfgdev.pci_capability.function)
            self.assertEqual(cmp_addr, address)

        pf_xml = """<device>
  <name>pci_0000_81_00_0</name>
  <path>/sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0</path>
  <parent>pci_0000_80_01_0</parent>
  <driver>
    <name>ixgbe</name>
  </driver>
  <capability type='pci'>
    <domain>0</domain>
    <bus>129</bus>
    <slot>0</slot>
    <function>0</function>
    <product id='0x1528'>Ethernet Controller 10-Gigabit X540-AT2</product>
    <vendor id='0x8086'>Intel Corporation</vendor>
    <capability type='virt_functions'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x1'/>
    </capability>
    <iommuGroup number='40'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x0'/>
    </iommuGroup>
    <numa node='0'/>
    <pci-express>
      <link validity='cap' port='0' speed='5' width='8'/>
      <link validity='sta' speed='5' width='8'/>
    </pci-express>
  </capability>
</device>"""
        vf_xml = """<device>
  <name>pci_0000_81_00_1</name>
  <path>/sys/devices/pci0000:80/0000:80:01.0/0000:81:00.1</path>
  <parent>pci_0000_81_00_0</parent>
  <driver>
    <name>ixgbevf</name>
  </driver>
  <capability type='pci'>
    <domain>0</domain>
    <bus>129</bus>
    <slot>0</slot>
    <function>1</function>
    <product id='0x1515'>X540 Ethernet Controller Virtual Function</product>
    <vendor id='0x8086'>Intel Corporation</vendor>
    <capability type='phys_function'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x0'/>
    </capability>
    <iommuGroup number='41'>
      <address domain='0x0000' bus='0x81' slot='0x0' function='0x1'/>
    </iommuGroup>
    <numa node='0'/>
    <pci-express>
      <link validity='cap' port='0' speed='5' width='8'/>
      <link validity='sta' speed='5' width='8'/>
    </pci-express>
  </capability>
</device>"""

        # create fake pci devices
        pci_info = libvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)

        # generate xml for the created pci devices
        gen_pf = pci_info.get_device_by_name('pci_0000_81_00_0')
        gen_vf = pci_info.get_device_by_name('pci_0000_81_00_1')

        self.assertXmlEqual(gen_pf.XMLDesc(0), pf_xml)
        self.assertXmlEqual(gen_vf.XMLDesc(0), vf_xml)

        # parse the generated xml with a libvirt config class and compare
        # device address
        _cmp_pci_dev_addr(pf_xml, '0000:81:00.0')
        _cmp_pci_dev_addr(vf_xml, '0000:81:00.1')
Beispiel #4
0
    def test_resize_servers_with_mlx5(self):
        # Add another compute for the sake of resizing
        self.compute2 = self.start_compute('host2')
        # Regenerate the PCI addresses so they can support both mlx5 and
        # nvidia-12 types
        connection = self.computes[
            self.compute2.host].driver._host.get_connection()
        connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
            num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, generic_types=True)
        # Make a restart to update the Resource Providers
        self.compute2 = self.restart_compute_service(self.compute2)

        # Use the new flavor for booting
        server = self._create_server(
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=self.flavor,
            networks='auto',
            host=self.compute1.host)

        # Make sure we only have 1 mdev for compute1
        self.assert_mdev_usage(self.compute1,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')
        self.assert_mdev_usage(self.compute2,
                               expected_amount=0,
                               expected_rc='CUSTOM_NOTVGPU')

        new_flavor = self._create_flavor(memory_mb=4096,
                                         extra_spec=self.extra_spec)
        # First, resize and then revert.
        self._resize_server(server, new_flavor)
        # After resizing, we then have two mdevs, both for each compute
        self.assert_mdev_usage(self.compute1,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')
        self.assert_mdev_usage(self.compute2,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')

        self._revert_resize(server)
        # We're back to the original resources usage
        self.assert_mdev_usage(self.compute1,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')
        self.assert_mdev_usage(self.compute2,
                               expected_amount=0,
                               expected_rc='CUSTOM_NOTVGPU')

        # Now resize and then confirm it.
        self._resize_server(server, new_flavor)
        self.assert_mdev_usage(self.compute1,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')
        self.assert_mdev_usage(self.compute2,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')

        self._confirm_resize(server)
        # In the last case, the source guest disappeared so we only have 1 mdev
        self.assert_mdev_usage(self.compute1,
                               expected_amount=0,
                               expected_rc='CUSTOM_NOTVGPU')
        self.assert_mdev_usage(self.compute2,
                               expected_amount=1,
                               expected_rc='CUSTOM_NOTVGPU')
Beispiel #5
0
    def test_create_servers_with_specific_type(self):
        # Regenerate the PCI addresses so both pGPUs now support nvidia-12
        connection = self.computes[
            self.compute1.host].driver._host.get_connection()
        connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
            num_pci=0,
            num_pfs=0,
            num_vfs=0,
            num_mdevcap=2,
            multiple_gpu_types=True)
        # Make a restart to update the Resource Providers
        self.compute1 = self.restart_compute_service(self.compute1)
        pgpu1_rp_uuid = self._get_provider_uuid_by_name(
            self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV1_PCI_ADDR)
        pgpu2_rp_uuid = self._get_provider_uuid_by_name(
            self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV2_PCI_ADDR)

        pgpu1_inventory = self._get_provider_inventory(pgpu1_rp_uuid)
        self.assertEqual(16, pgpu1_inventory[orc.VGPU]['total'])
        pgpu2_inventory = self._get_provider_inventory(pgpu2_rp_uuid)
        self.assertEqual(8, pgpu2_inventory[orc.VGPU]['total'])

        # Attach traits to the pGPU RPs
        self._set_provider_traits(pgpu1_rp_uuid, ['CUSTOM_NVIDIA_11'])
        self._set_provider_traits(pgpu2_rp_uuid, ['CUSTOM_NVIDIA_12'])

        expected = {
            'CUSTOM_NVIDIA_11': fakelibvirt.MDEVCAP_DEV1_PCI_ADDR,
            'CUSTOM_NVIDIA_12': fakelibvirt.MDEVCAP_DEV2_PCI_ADDR
        }

        for trait in expected.keys():
            # Add a trait to the flavor
            extra_spec = {
                "resources:VGPU": "1",
                "trait:%s" % trait: "required"
            }
            flavor = self._create_flavor(extra_spec=extra_spec)

            # Use the new flavor for booting
            server = self._create_server(
                image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
                flavor_id=flavor,
                networks='auto',
                host=self.compute1.host)

            # Get the instance we just created
            inst = objects.Instance.get_by_uuid(self.context, server['id'])
            # Get the mdevs that were allocated for this instance, we should
            # only have one
            mdevs = self.compute1.driver._get_all_assigned_mediated_devices(
                inst)
            self.assertEqual(1, len(mdevs))

            # It's a dict of mdev_uuid/instance_uuid pairs, we only care about
            # the keys
            mdevs = list(mdevs.keys())
            # Now get the detailed information about this single mdev
            mdev_info = self.compute1.driver._get_mediated_device_information(
                libvirt_utils.mdev_uuid2name(mdevs[0]))

            # We can be deterministic : since we asked for a specific type,
            # we know which pGPU we landed.
            self.assertEqual(expected[trait], mdev_info['parent'])