def _boot_multiports_vm(flavor,
                        mgmt_net_id,
                        vifs,
                        net_id,
                        net_type,
                        base_vm,
                        pcipt_seg_id=None):
    nics = [{'net-id': mgmt_net_id}]

    nics, glance_vif = _append_nics_for_net(vifs, net_id=net_id, nics=nics)
    img_id = None
    if glance_vif:
        img_id = glance_helper.create_image(name=glance_vif,
                                            hw_vif_model=glance_vif,
                                            cleanup='function')[1]

    LOG.tc_step(
        "Boot a test_vm with following nics on same networks as base_vm: {}".
        format(nics))
    vm_under_test = vm_helper.boot_vm(name='multiports',
                                      nics=nics,
                                      flavor=flavor,
                                      cleanup='function',
                                      image_id=img_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

    if pcipt_seg_id:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                   net_seg_id=pcipt_seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping test_vm's own {} network ips".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=vm_under_test,
                               net_types=net_type)

    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)

    LOG.tc_step(
        "Ping test_vm from base_vm to verify management and data networks connection"
    )
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    return vm_under_test, nics
def check_vm_pci_interface(vms,
                           net_type,
                           seg_id=None,
                           ping_timeout=VMTimeout.PING_VM):
    for vm in vms:
        vm_helper.wait_for_vm_pingable_from_natbox(vm, timeout=ping_timeout)

    LOG.tc_step(
        "Check vms mgmt and {} interfaces reachable from other vm".format(
            net_type))
    if seg_id:
        for vm_id in vms:
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=seg_id)

    # Ensure pci interface working well
    vm_helper.ping_vms_from_vm(vms, vms[0], net_types=['mgmt', net_type])
    def test_multiports_on_same_network_pci_evacuate_vm(self, base_setup_pci,
                                                        vifs):
        """
        Test evacuate vm with multiple ports on same network

        Args:
            base_setup_pci (tuple): base vm id, vm under test id, segment id
                for internal0-net1
            vifs (list): list of vifs to add to same internal net

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used
            by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox     (class)
            - Ping vm2's own data network ips       (class)
            - Ping vm2 from vm1 to verify management and internal networks
            connection   (class)

        Test Steps:
            - Reboot vm2 host
            - Wait for vm2 to be evacuated to other host
            - Wait for vm2 pingable from NatBox
            - Verify ping from vm1 to vm2 over management and internal
            networks still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        internal_net_id = None
        pcipt_included = False
        nics = copy.deepcopy(base_nics)
        if 'pci-passthrough' in vifs:
            if not avail_pcipt_net:
                skip(SkipHostIf.PCIPT_IF_UNAVAIL)
            pcipt_included = True
            internal_net_id = avail_pcipt_net
            if extra_pcipt_net:
                nics.append(
                    {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})
        if 'pci-sriov' in vifs:
            if not avail_sriov_net:
                skip(SkipHostIf.SRIOV_IF_UNAVAIL)
            internal_net_id = avail_sriov_net
        assert internal_net_id, "test script error. sriov or pcipt has to be " \
                                "included."

        for vif in vifs:
            nics.append({'net-id': internal_net_id, 'vif-model': vif})

        LOG.tc_step(
            "Boot a vm with following vifs on same network internal0-net1: "
            "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci_evac',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])
        vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, and "
            "internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        host = vm_helper.get_vm_host(vm_under_test)

        LOG.tc_step("Reboot vm host {}".format(host))
        vm_helper.evacuate_vms(host=host, vms_to_check=vm_under_test,
                               ping_vms=True)

        if pcipt_included:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device "
                "for vm {}.".format(vm_under_test))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management and "
            "internal networks still works after evacuation.")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'internal'])
    def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci,
                                                       vifs):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id,
                tenant_net_id, internal_net_id, seg_id
            vifs (list): list of vifs to add to same internal net

        Setups:
            - Create a flavor with dedicated cpu policy (class)
            - Choose management net, one tenant net, and internal0-net1 to be
            used by test (class)
            - Boot a base pci-sriov vm - vm1 with above flavor and networks,
            ping it from NatBox (class)
            - Ping vm1 from itself over data, and internal networks

        Test Steps:
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with vm1,
                and ping it from NatBox
            - Ping vm2's own data and internal network ips
            - Ping vm2 from vm1 to verify management and data networks
            connection
            - Perform one of the following actions on vm2
                - set to error/ wait for auto recovery
                - suspend/resume
                - cold migration
                - pause/unpause
            - Update vlan interface to proper eth if pci-passthrough device
            moves to different eth
            - Verify ping from vm1 to vm2 over management and data networks
            still works
            - Repeat last 3 steps with different vm actions

        Teardown:
            - Delete created vms and flavor
        """

        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        pcipt_included = False
        internal_net_id = None
        for vif in vifs:
            if not isinstance(vif, str):
                vif = vif[0]
            if 'pci-passthrough' in vif:
                if not avail_pcipt_net:
                    skip(SkipHostIf.PCIPT_IF_UNAVAIL)
                internal_net_id = avail_pcipt_net
                pcipt_included = True
                continue
            elif 'pci-sriov' in vif:
                if not avail_sriov_net:
                    skip(SkipHostIf.SRIOV_IF_UNAVAIL)
                internal_net_id = avail_sriov_net

        assert internal_net_id, "test script error. Internal net should have " \
                                "been determined."

        nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id,
                                                nics=base_nics)
        if pcipt_included and extra_pcipt_net:
            nics.append(
                {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})

        img_id = None
        if glance_vif:
            img_id = glance_helper.create_image(name=glance_vif,
                                                hw_vif_model=glance_vif,
                                                cleanup='function')[1]

        LOG.tc_step("Boot a vm with following vifs on same internal net: "
                    "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False, image_id=img_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, "
            "and internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        for vm_actions in [['auto_recover'], ['cold_migrate'],
                           ['pause', 'unpause'], ['suspend', 'resume']]:
            if 'auto_recover' in vm_actions:
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, "
                    "then verify ping from base vm over management and "
                    "internal networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=False, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    vm_helper.perform_action_on_vm(vm_under_test, action=action)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test)
            if pcipt_included:
                LOG.tc_step(
                    "Bring up vlan interface for pci-passthrough vm {}.".format(
                        vm_under_test))
                vm_helper.add_vlan_for_vm_pcipt_interfaces(
                    vm_id=vm_under_test, net_seg_id=pcipt_seg_ids)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and internal networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_pci,
                                       net_types=['mgmt', 'internal'])
Beispiel #5
0
def test_evacuate_pci_vm(vif_model_check):
    """
    Test evacuate vm with multiple ports on same network

    Args:

    Setups:
        - create a flavor with dedicated cpu policy (module)
        - choose one tenant network and one internal network to be used by test (module)
        - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
        - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
        and ping it from NatBox     (class)
        - Ping vm2's own data network ips       (class)
        - Ping vm2 from vm1 to verify management and data networks connection   (class)

    Test Steps:
        - Reboot vm2 host
        - Wait for vm2 to be evacuated to other host
        - Wait for vm2 pingable from NatBox
        - Verify ping from vm1 to vm2 over management and data networks still works

    Teardown:
        - Delete created vms and flavor
    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net = vif_model_check

    LOG.tc_step("Boot a vm with {} vif model on {} net".format(
        vif_model, net_type))
    res, vm_id, err = vm_helper.boot_vm(name=vif_model,
                                        flavor=flavor_id,
                                        cleanup='function',
                                        nics=nics_to_test)
    assert 0 == res, "VM is not booted successfully. Error: {}".format(err)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping vm over mgmt and {} nets from base vm".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    host = vm_helper.get_vm_host(vm_id)

    # Remove the following ssh VM to sync code once CGTS-9279 is fixed
    LOG.tc_step("Login in to VM & do sync command")
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        vm_ssh.exec_sudo_cmd('sync')

    LOG.tc_step("Reboot vm host {}".format(host))
    vm_helper.evacuate_vms(host=host,
                           vms_to_check=vm_id,
                           ping_vms=True,
                           wait_for_host_up=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step(
            "Add vlan to pci-passthrough interface for VM again after evacuation due to interface change."
        )
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id)

    LOG.tc_step(
        "Check vm still pingable over mgmt, and {} nets after evacuation".
        format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    LOG.tc_step(
        "Wait for rebooted host {} to recover and ensure vm are still reachable"
        .format(host))
    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])
Beispiel #6
0
    def test_pci_vm_nova_actions(self, pci_numa_affinity,
                                 pci_irq_affinity_mask, pci_alias,
                                 vif_model_check, pci_dev_numa_nodes):
        """
        Test vm actions on vm with multiple ports with given vif models on the same tenant network

        Args:

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify ping from vm1 to vm2 over management and data networks still works
            - Verify the correct number of PCI devices are created, in correct types,
                    the numa node of the PCI devices aligns with that of CPUs, and affined CPUs for PCI devices
                    are same as specified by 'pci_alias' (if applicable)

        Teardown:
            - Delete created vms and flavor
        """
        pci_irq_affinity_mask, pci_alias = _convert_irqmask_pcialias(
            pci_irq_affinity_mask, pci_alias)
        boot_forbidden = False
        migrate_forbidden = False
        if pci_numa_affinity == 'required' and pci_alias is not None:
            host_count = pci_dev_numa_nodes
            if host_count == 0:
                boot_forbidden = True
            elif host_count == 1:
                migrate_forbidden = True
        LOG.tc_step(
            "Expected result - Disallow boot: {}; Disallow migrate: {}".format(
                boot_forbidden, migrate_forbidden))

        self.pci_numa_affinity = pci_numa_affinity
        self.pci_alias = pci_alias
        self.pci_irq_affinity_mask = pci_irq_affinity_mask

        if pci_alias is not None:
            LOG.info('Check if PCI-Alias devices existing')
            self.is_pci_device_supported(pci_alias)

        self.vif_model, self.base_vm, self.base_flavor_id, self.nics_to_test, self.seg_id, \
            self.pnet_name, self.extra_pcipt_net = vif_model_check

        LOG.tc_step(
            "Create a flavor with specified extra-specs and dedicated cpu policy"
        )
        flavor_id = self.create_flavor_for_pci()

        LOG.tc_step("Boot a vm with {} vif model on internal net".format(
            self.vif_model))
        # TODO: feature unavailable atm. Update required
        # resource_param = 'pci_vfs_used' if 'sriov' in self.vif_model else 'pci_pfs_used'
        # LOG.tc_step("Get resource usage for {} interface before booting VM(s)".format(self.vif_model))
        # pre_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)

        res, vm_id, err = vm_helper.boot_vm(name=self.vif_model,
                                            flavor=flavor_id,
                                            cleanup='function',
                                            nics=self.nics_to_test,
                                            fail_ok=boot_forbidden)
        if boot_forbidden:
            assert res > 0, "VM booted successfully while it numa node for pcipt/sriov and pci alias mismatch"
            return

        self.vm_id = vm_id

        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=self.seg_id,
                                                       init_conf=True)

        LOG.tc_step("Ping vm over mgmt and internal nets from base vm")
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.vm_id,
                                   net_types=['mgmt', 'internal'])
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        self.vm_topology = vm_helper.get_vm_values(
            vm_id=self.vm_id, fields='wrs-res:topology')[0]
        vnic_type = 'direct' if self.vif_model == 'pci-sriov' else 'direct-physical'
        self.pci_nics = vm_helper.get_vm_nics_info(vm_id=self.vm_id,
                                                   vnic_type=vnic_type)
        assert self.pci_nics

        self.wait_check_vm_states(step='boot')

        # TODO: feature unavailable atm. Update required
        # LOG.tc_step("Check {} usage is incremented by 1".format(resource_param))
        # post_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)
        # expt_change = 2 if self.vif_model == 'pci-passthrough' and self.extra_pcipt_net else 1
        # assert pre_resource_value + expt_change == post_resource_value, "{} usage is not incremented by {} as " \
        #                                                                 "expected".format(resource_param, expt_change)

        LOG.tc_step('Pause/Unpause {} vm'.format(self.vif_model))
        vm_helper.pause_vm(self.vm_id)
        vm_helper.unpause_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after pause/unpause"
        )
        self.wait_check_vm_states(step='pause/unpause')

        LOG.tc_step('Suspend/Resume {} vm'.format(self.vif_model))
        vm_helper.suspend_vm(self.vm_id)
        vm_helper.resume_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after suspend/resume"
        )
        self.wait_check_vm_states(step='suspend/resume')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Cold migrate {} vm'.format(self.vif_model))
        code, msg = vm_helper.cold_migrate_vm(self.vm_id,
                                              fail_ok=migrate_forbidden)
        if migrate_forbidden:
            assert code > 0, "Expect migrate fail due to no other host has pcipt/sriov and pci-alias on same numa. " \
                             "Actual: {}".format(msg)
        self.wait_check_vm_states(step='cold-migrate')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after cold migration"
        )
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Set vm to error and wait for it to be auto recovered')
        vm_helper.set_vm_state(vm_id=self.vm_id,
                               error_state=True,
                               fail_ok=False)
        vm_helper.wait_for_vm_values(vm_id=self.vm_id,
                                     status=VMStatus.ACTIVE,
                                     fail_ok=False,
                                     timeout=600)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after auto recovery"
        )
        self.wait_check_vm_states(step='set-error-state-recover')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step("Hard reboot {} vm".format(self.vif_model))
        vm_helper.reboot_vm(self.vm_id, hard=True)
        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after nova reboot hard"
        )
        self.wait_check_vm_states(step='hard-reboot')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step(
            "Create a flavor with dedicated cpu policy and resize vm to new flavor"
        )
        resize_flavor = nova_helper.create_flavor(name='dedicated',
                                                  ram=2048,
                                                  cleanup='function')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=resize_flavor, **extra_specs)
        vm_helper.resize_vm(self.vm_id, resize_flavor)

        LOG.tc_step("Check vm still reachable after resize")
        self.wait_check_vm_states(step='resize')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])
Beispiel #7
0
def _test_pci_resource_usage(vif_model_check):
    """
    Create a vm under test with specified vifs for tenant network

    Returns (str): id of vm under test

    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net_name, extra_pcipt_net = \
        vif_model_check

    LOG.tc_step("Ensure core/vm quota is sufficient")

    if 'sriov' in vif_model:
        vm_type = 'sriov'
        resource_param = 'pci_vfs_used'
        max_resource = 'pci_vfs_configured'
    else:
        vm_type = 'pcipt'
        resource_param = 'pci_pfs_used'
        max_resource = 'pci_pfs_configured'

    LOG.tc_step(
        "Get resource usage for {} interface before booting VM(s)".format(
            vif_model))
    LOG.info("provider net for {} interface: {}".format(vif_model, pnet_name))

    assert pnet_name, "provider network for {} interface is not found".format(
        vif_model)

    total_val, pre_resource_value = nova_helper.get_pci_interface_stats_for_providernet(
        pnet_name, fields=(max_resource, resource_param))
    LOG.info("Resource Usage {} for {}. Resource configured: {}".format(
        pre_resource_value, vif_model, total_val))

    expt_change = 2 if vif_model == 'pci-passthrough' and extra_pcipt_net else 1
    vm_limit = int((total_val - pre_resource_value) /
                   expt_change) if vif_model == 'pci-passthrough' else 5
    vm_helper.ensure_vms_quotas(vm_limit + 5)
    vms_under_test = []
    for i in range(vm_limit):
        LOG.tc_step("Boot a vm with {} vif model on {} net".format(
            vif_model, net_type))
        vm_id = vm_helper.boot_vm(name=vif_model,
                                  flavor=flavor_id,
                                  cleanup='function',
                                  nics=nics_to_test)[1]
        vms_under_test.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        if vm_type == 'pcipt':
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=seg_id)

        LOG.tc_step(
            "Ping vm over mgmt and {} nets from itself".format(net_type))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm_id,
                                   net_types=['mgmt', net_type])

        LOG.tc_step(
            "Check resource usage for {} interface increased by 1".format(
                vif_model))
        resource_value = nova_helper.get_provider_net_info(
            pnet_name, field=resource_param)
        assert pre_resource_value + expt_change == resource_value, "Resource usage for {} is not increased by {}". \
            format(vif_model, expt_change)

        pre_resource_value = resource_value

    for vm_to_del in vms_under_test:
        LOG.tc_step(
            "Check resource usage for {} interface reduced by 1 after deleting a vm"
            .format(vif_model))
        vm_helper.delete_vms(vm_to_del, check_first=False, stop_first=False)
        resource_val = common.wait_for_val_from_func(
            expt_val=pre_resource_value - expt_change,
            timeout=30,
            check_interval=3,
            func=nova_helper.get_provider_net_info,
            providernet_id=pnet_name,
            field=resource_param)[1]

        assert pre_resource_value - expt_change == resource_val, "Resource usage for {} is not reduced by {}". \
            format(vif_model, expt_change)
        pre_resource_value = resource_val
    def test_pcipt_robustness(self, pcipt_prep):
        """
        TC3_robustness: PCI-passthrough by locking and rebooting pci_vm host

        Args:
            pcipt_prep: test fixture to set up test environment and get proper pci nets/hosts/seg_id

        Setups:
            - select a providernet with pcipt interfaces configured
            - get pci hosts configured with same above providernet
            - get one network under above providernet (or two for CX4 nic)

        Test Steps:
            - Boot 2 pcipt vms with pci-passthrough vif over selected network
            - Verify resource usage for providernet is increased as expected
            - Lock pci_vm host and ensure vm migrated to other host (or fail to lock if no other pcipt host available)
            - (Delete above tested pcipt vm if only two pcipt hosts available)
            - Lock host for another pcipt vm, and lock is successful
            - Verify vms' pci-pt interfaces reachable and resource usage for pnet as expected
            - 'sudo reboot -f' pci_vm host, and ensure vm evacuated or up on same host if no other pcipt host available
            - Repeat above step for another pcipt vm
            - Verify vms' pci-pt interfaces reachable and resource usage for pnet unchanged

        Teardown:
            - Delete vms, volumes, flavor created
            - Recover hosts if applicable

        """
        net_type, pci_net_name, pci_hosts, pnet_id, nics, min_vcpu_host, seg_id, vm_num, vm_vcpus, pfs_use_init = \
            pcipt_prep
        vif_model = 'pci-passthrough'

        # Create flavor with calculated vcpu number
        LOG.fixture_step(
            "Create a flavor with dedicated cpu policy and {} vcpus".format(
                vm_vcpus))
        flavor_id = nova_helper.create_flavor(
            name='dedicated_{}vcpu'.format(vm_vcpus), ram=1024,
            vcpus=vm_vcpus)[1]
        ResourceCleanup.add('flavor', flavor_id, scope='module')
        extra_specs = {
            FlavorSpec.CPU_POLICY: 'dedicated',
        }
        # FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'}    # LP1854516
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        # Boot vms with 2 {} vifs each, and wait for pingable
        LOG.tc_step("Boot {} vms with 2 {} vifs each".format(
            vm_num, vif_model))
        vms = []
        for i in range(vm_num):
            LOG.info("Booting pci-passthrough vm{}".format(i + 1))
            vm_id = vm_helper.boot_vm(flavor=flavor_id,
                                      nics=nics,
                                      cleanup='function')[1]
            vms.append(vm_id)
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id,
                                                       seg_id,
                                                       init_conf=True)

        # TODO: feature unavailable atm. Update required
        # pfs_use_post_boot = nova_helper.get_provider_net_info(pnet_id, field='pci_pfs_used')
        # resource_change = 2 if isinstance(seg_id, dict) else 1
        # assert pfs_use_post_boot - pfs_use_init == vm_num * resource_change, "Number of PCI pfs used is not as expected"

        check_vm_pci_interface(vms=vms, net_type=net_type)
        HostsToRecover.add(pci_hosts)

        # pfs_use_pre_action = pfs_use_post_boot
        iter_count = 2 if len(pci_hosts) < 3 else 1
        for i in range(iter_count):
            if i == 1:
                LOG.tc_step(
                    "Delete a pcipt vm and test lock and reboot pcipt host again for success pass"
                )
                vm_helper.delete_vms(vms=vms[1])
                vms.pop()
                # TODO: feature unavailable atm. Update required
                # pfs_use_pre_action -= resource_change
                # common.wait_for_val_from_func(expt_val=pfs_use_pre_action, timeout=30, check_interval=3,
                #                               func=nova_helper.get_provider_net_info,
                #                               providernet_id=pnet_id, field='pci_pfs_used')

            LOG.tc_step("Test lock {} vms hosts started - iter{}".format(
                vif_model, i + 1))
            for vm in vms:
                pre_lock_host = vm_helper.get_vm_host(vm)
                assert pre_lock_host in pci_hosts, "VM is not booted on pci_host"

                LOG.tc_step("Lock host of {} vms: {}".format(
                    vif_model, pre_lock_host))
                code, output = host_helper.lock_host(host=pre_lock_host,
                                                     check_first=False,
                                                     swact=True,
                                                     fail_ok=True)
                post_lock_host = vm_helper.get_vm_host(vm)
                assert post_lock_host in pci_hosts, "VM is not on pci host after migrating"

                if len(pci_hosts) < 3 and i == 0:
                    assert 5 == code, "Expect host-lock fail due to migration of vm failure. Actual: {}".format(
                        output)
                    assert pre_lock_host == post_lock_host, "VM host should not change when no other host to migrate to"
                else:
                    assert 0 == code, "Expect host-lock successful. Actual: {}".format(
                        output)
                    assert pre_lock_host != post_lock_host, "VM host did not change"
                    LOG.tc_step("Unlock {}".format(pre_lock_host))

                check_vm_pci_interface(vms, net_type=net_type)
                host_helper.unlock_host(pre_lock_host, available_only=True)
            # TODO: feature unavailable atm. Update required
            # pfs_use_post_lock = nova_helper.get_provider_net_info(pnet_id, field='pci_pfs_used')
            # assert pfs_use_pre_action == pfs_use_post_lock, "Number of PCI pfs used after host-lock is not as expected"

            LOG.tc_step("Test evacuate {} vms started - iter{}".format(
                vif_model, i + 1))
            for vm in vms:
                pre_evac_host = vm_helper.get_vm_host(vm)

                LOG.tc_step(
                    "Reboot {} and ensure {} vm are evacuated when applicable".
                    format(pre_evac_host, vif_model))
                code, output = vm_helper.evacuate_vms(pre_evac_host,
                                                      vm,
                                                      fail_ok=True,
                                                      wait_for_host_up=True)

                if len(pci_hosts) < 3 and i == 0:
                    assert 1 == code, "Expect vm stay on same host due to migration fail. Actual:{}".format(
                        output)
                    vm_helper.wait_for_vm_status(vm_id=vm)
                else:
                    assert 0 == code, "Expect vm evacuated to other host. Actual: {}".format(
                        output)
                    post_evac_host = vm_helper.get_vm_host(vm)
                    assert post_evac_host in pci_hosts, "VM is not on pci host after evacuation"

                check_vm_pci_interface(vms, net_type=net_type)