def test_force_lock_with_non_mig_vms(add_host_to_zone):
    """
    Test force lock host with non-migrate-able vms on it

    Prerequisites:
        - Minimum of two up hypervisors
    Test Setups:
        - Add admin role to primary tenant
        - Create cgcsauto aggregate
        - Add host_under_test to cgcsauto aggregate
        - Create flavor for vms_to_test with storage_backing support by host_under_test
        - Create vms_to_test on host_under_test that can be live migrated
    Test Steps:
        - Force lock target host
        - Verify force lock returns 0
        - Verify VMs cannot find a host to boot and are in error state
        - Unlock locked target host
        - Verify VMs are active on host once it is up and available
        - Verify VMs can be pinged
    Test Teardown:
        - Remove admin role from primary tenant
        - Delete created vms
        - Remove host_under_test from cgcsauto aggregate
    """
    storage_backing, host_under_test = add_host_to_zone

    # Create flavor with storage_backing the host_under_test supports
    flavor_id = nova_helper.create_flavor(storage_backing=storage_backing)[1]

    # Boot VMs on the host using the above flavor.
    LOG.tc_step("Boot VM on {}".format(host_under_test))
    vm_id = vm_helper.boot_vm(vm_host=host_under_test,
                              flavor=flavor_id,
                              avail_zone='cgcsauto',
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    # Force lock host that VMs are booted on.
    LOG.tc_step("Force lock {}".format(host_under_test))
    HostsToRecover.add(host_under_test)
    lock_code, lock_output = host_helper.lock_host(host_under_test, force=True)
    assert lock_code == 0, "Failed to lock {}. Details: {}".format(
        host_under_test, lock_output)

    vm_helper.wait_for_vm_values(vm_id, fail_ok=False, **{'status': 'ERROR'})

    host_helper.unlock_host(host_under_test)

    vm_helper.wait_for_vm_values(vm_id,
                                 timeout=300,
                                 fail_ok=False,
                                 **{'status': 'ACTIVE'})
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id,
                                               timeout=VMTimeout.DHCP_RETRY)
Esempio n. 2
0
def test_vcpu_model_flavor_and_image(flv_model, img_model, boot_source, error,
                                     cpu_models_supported):
    """
    Test when vcpu model is set in both flavor and image
    Args:
        flv_model (str): vcpu model flavor extra spec setting
        img_model (str): vcpu model metadata in image
        boot_source (str): launch vm from image or volume
        error (str|None): whether an error is expected with given flavor/image vcpu settings
        cpu_models_supported (tuple): fixture

    Test steps:
        - Create a flavor and set vcpu model spec as specified
        - Create an image and set image metadata as specified
        - Launch a vm from image/volume using above flavor and image
        - If error is specified, check cpu model conflict error is displayed in nova show
        - Otherwise check vm is launched successfully and expected cpu model is used

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    if not error:
        if flv_model != 'Passthrough' and (flv_model
                                           not in all_cpu_models_supported):
            skip("vcpu model {} is not supported by system".format(flv_model))

    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    if error:
        assert 1 == code
        vm_helper.wait_for_vm_values(vm,
                                     10,
                                     regex=True,
                                     strict=False,
                                     status='ERROR',
                                     fail_ok=False)
        err = vm_helper.get_vm_fault_message(vm)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_CONFLICT
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
    else:
        assert 0 == code, "Boot vm failed when cpu model in flavor and image both set to: {}".format(
            flv_model)
        check_vm_cpu_model(vm_id=vm, vcpu_model=flv_model)
Esempio n. 3
0
    def test_launch_vm_shared_cpu_setting_negative(self, vcpus, cpu_policy, shared_vcpu,
                                                   check_numa_num, remove_shared_cpu):
        """
        Test boot vm cli returns error when system does not meet the shared cpu requirement(s) in given flavor

        Args:
            vcpus (int): number of vcpus to set when creating flavor
            cpu_policy (str): 'dedicated' or 'shared' to set in flavor extra specs
            shared_vcpu (int):
            check_numa_num (int)
            remove_shared_cpu (tuple)

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, number of numa nodes, nume_node.0 , shared_vcpu values to flavor extra specs
            - Attempt to boot a vm with the flavor
            - Ensure proper error is returned

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)

        """

        # if (numa_node0 == 1 or numa_nodes == 2) and check_numa_num < 2:
        #     skip('At least 2 processors are required on compute host to launch vm with numa_nodes=2 or numa_node.0=1')

        storage_backing, avail_zone = remove_shared_cpu
        LOG.tc_step("Create flavor with given numa configs")
        flavor = create_shared_flavor(vcpus=vcpus, cpu_policy=cpu_policy, storage_backing=storage_backing,
                                      shared_vcpu=shared_vcpu)

        LOG.tc_step("Attempt to launch a vm with conflig numa node requirements")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu_negative', flavor=flavor, fail_ok=True,
                                                cleanup='function', avail_zone=avail_zone)

        assert 1 == code, 'Expect boot vm cli return error, although vm is booted anyway. Actual: {}'.format(output)
        LOG.tc_step("Ensure vm is in error state with expected fault message in nova show")
        vm_helper.wait_for_vm_values(vm_id, 10, status='ERROR', fail_ok=False)
        actual_fault = vm_helper.get_vm_fault_message(vm_id)
        expt_fault = 'Shared vCPU not enabled on host cell'

        assert expt_fault in actual_fault, "Expected fault message mismatch"
Esempio n. 4
0
def test_interface_attach_detach_max_vnics(guest_os, if_attach_arg, vifs,
                                           check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach to maximum vnics

    Setups:
        - Boot a base vm with mgmt net and internal0-net1   (module)

    Test Steps:
        - Boot a vm with only mgmt interface
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - Perform VM action - Cold migrate, live migrate, pause resume, suspend resume
        - Verify ping between base_vm and vm_under_test over mgmt & tenant network after vm operation
        - detach all the tenant interface
        - Repeat attach/detach after performing each vm action

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """
    if guest_os == 'vxworks' and not system_helper.is_avs():
        skip('e1000 vif unsupported by OVS')

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm

    glance_vif = None
    if not (if_attach_arg == 'port_id' and system_helper.is_avs()):
        for vif in vifs:
            if vif[0] in ('e1000', 'rtl8139'):
                glance_vif = vif[0]
                break

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if (not glance_vif and re.search(
        GuestImages.TIS_GUEST_PATTERN, guest_os)) else 'function'
    image_id = glance_helper.get_guest_image(
        guest_os=guest_os,
        cleanup=cleanup,
        use_existing=False if cleanup else True)

    if glance_vif:
        glance_helper.set_image(image_id,
                                hw_vif_model=glance_vif,
                                new_name='{}_{}'.format(guest_os, glance_vif))

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=1,
                                          guest_os=guest_os,
                                          cleanup='function')[1]

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    code, vol_id = cinder_helper.create_volume(name='vol-' + guest_os,
                                               source_id=image_id,
                                               fail_ok=True,
                                               guest_image=guest_os,
                                               cleanup='function')
    assert 0 == code, "Issue occurred when creating volume"
    source_id = vol_id

    LOG.tc_step("Boot a vm with mgmt nic only")
    vm_under_test = vm_helper.boot_vm(name='if_attach_tenant',
                                      nics=[mgmt_nic],
                                      source_id=source_id,
                                      flavor=flavor_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]
    prev_port_count = 1
    for vm_actions in [['live_migrate'], ['cold_migrate'],
                       ['pause', 'unpause'], ['suspend', 'resume'],
                       ['stop', 'start']]:
        tenant_port_ids = []
        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Attach specified vnics to the VM before {} and bring up interfaces"
                .format(vm_actions))
            expt_vnics = 1
            for vif in vifs:
                vif_model, vif_count = vif
                expt_vnics += vif_count
                LOG.info("iter {}".format(vif_count))
                for i in range(vif_count):
                    if if_attach_arg == 'port_id':
                        vif_model = vif_model if system_helper.is_avs(
                        ) else None
                        port = network_helper.create_port(
                            net_id=tenant_net_id,
                            wrs_vif=vif_model,
                            cleanup='function',
                            name='attach_{}_{}'.format(vif_model, i))[1]
                        kwargs = {'port_id': port}
                    else:
                        kwargs = {'net_id': tenant_net_id}
                    tenant_port_id = vm_helper.attach_interface(
                        vm_under_test, **kwargs)[1]
                    tenant_port_ids.append(tenant_port_id)
                LOG.info(
                    "Attached new vnics to the VM {}".format(tenant_port_ids))

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            LOG.info("vnics attached to VM: {}".format(vm_ports_count))
            assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

            LOG.info(
                "Bring up all the attached new vifs {} on tenant net from vm".
                format(vifs))
            _bring_up_attached_interface(vm_under_test,
                                         ports=tenant_port_ids,
                                         guest_os=guest_os,
                                         base_vm=base_vm_id)

            if expt_vnics == 16:
                LOG.tc_step(
                    "Verify no more vnic can be attached after reaching upper limit 16"
                )
                res = vm_helper.attach_interface(vm_under_test,
                                                 net_id=tenant_net_id,
                                                 fail_ok=True)[0]
                assert res == 1, "vnics attach exceed maximum limit"

        if vm_actions[0] == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, then verify ping from "
                "base vm over management and data networks")
            vm_helper.set_vm_state(vm_id=vm_under_test,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
            # if 'vxworks' not in guest_os:
            #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)
        else:
            LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                vm_under_test, vm_actions))
            for action in vm_actions:
                vm_helper.perform_action_on_vm(vm_under_test, action=action)
                if action == 'cold_migrate' or action == 'start':
                    LOG.tc_step(
                        "Bring up all the attached tenant interface from vm after {}"
                        .format(vm_actions))
                    # if 'vxworks' not in guest_os:
                    #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_id,
                                       net_types=['mgmt', 'data'],
                                       retry=10)

            LOG.tc_step("Detach all attached interface {} after {}".format(
                tenant_port_ids, vm_actions))
            for tenant_port_id in tenant_port_ids:
                vm_helper.detach_interface(vm_id=vm_under_test,
                                           port_id=tenant_port_id,
                                           cleanup_route=True)

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            assert prev_port_count == vm_ports_count, "VM ports still listed after interface-detach"
            res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                             from_vm=vm_under_test,
                                             fail_ok=True,
                                             net_types=['data'],
                                             retry=0)[0]
            assert not res, "Detached interface still works"
Esempio n. 5
0
    def test_launch_vm_with_shared_cpu(self, vcpus, shared_vcpu, error, add_shared_cpu, origin_total_vcpus):
        """
        Test boot vm cli returns error when system does not meet the shared cpu requirement(s) in given flavor

        Args:
            vcpus (int): number of vcpus to set when creating flavor
            shared_vcpu (int):
            error
            add_shared_cpu
            origin_total_vcpus

        Setup:
            - Configure one compute to have shared cpus via 'system host-cpu-modify -f shared p0=1,p1=1 <hostname>'

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, number of numa nodes, nume_node.0 , shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Validate the shared cpu
            - Live migrate the vm
            - Re-validate the shared cpu
            - Cold migrate the vm
            - Re-validate the shared cpu

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)
            - Set shared cpus to 0 (default setting) on the compute node under test (module)

        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        LOG.tc_step("Create a flavor with given number of vcpus")

        flavor = create_shared_flavor(vcpus, storage_backing=storage_backing, shared_vcpu=shared_vcpu)

        LOG.tc_step("Boot a vm with above flavor")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=True, cleanup='function')

        if error:
            LOG.tc_step("Check vm boot fail")
            assert 1 == code, "Expect error vm. Actual result: {}".format(output)
            LOG.tc_step("Ensure vm is in error state with expected fault message in nova show")
            vm_helper.wait_for_vm_values(vm_id, 10, status='ERROR', fail_ok=False)
            actual_fault = vm_helper.get_vm_fault_message(vm_id)
            expt_fault = 'shared vcpu with 0 requested dedicated vcpus is not allowed'
            assert expt_fault in actual_fault, "Expected fault message mismatch"
            return

        LOG.tc_step("Check vm booted successfully and shared cpu indicated in vm-topology")
        assert 0 == code, "Boot vm failed. Details: {}".format(output)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # live migrate
        LOG.tc_step("Live migrate vm and then ping vm from NatBox")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # cold migrate
        LOG.tc_step("Cold migrate vm and then ping vm from NatBox")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)
Esempio n. 6
0
def test_nova_actions(guest_os, cpu_pol, actions):
    """

    Args:
        guest_os:
        cpu_pol:
        actions:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image with specified cpu
        policy
        - Perform given nova actions on vm
        - Ensure nova operation succeeded and vm still in good state (active
        and reachable from NatBox)

    """
    if guest_os == 'opensuse_12':
        if not cinder_helper.is_volumes_pool_sufficient(min_size=40):
            skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL)

    img_id = glance_helper.get_guest_image(guest_os=guest_os)

    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = nova_helper.create_flavor(name=cpu_pol, vcpus=1,
                                          root_disk=9)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    vol_id = \
        cinder_helper.create_volume(name='vol-' + guest_os, source_id=img_id,
                                    guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm('nova_actions',
                              flavor=flavor_id,
                              source='volume',
                              source_id=vol_id,
                              cleanup='function')[1]

    LOG.tc_step("Wait for VM pingable from NATBOX")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    for action in actions:
        if action == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, "
                "then verify ping from base vm over "
                "management and data networks")
            vm_helper.set_vm_state(vm_id=vm_id,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_id,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
        else:
            LOG.tc_step("Perform following action on vm {}: {}".format(
                vm_id, action))
            vm_helper.perform_action_on_vm(vm_id, action=action)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Esempio n. 7
0
def test_vm_autorecovery(cpu_policy, flavor_auto_recovery, image_auto_recovery, disk_format,
                                           container_format, expt_result):
    """
    Test auto recovery setting in vm with various auto recovery settings in flavor and image.

    Args:
        cpu_policy (str|None): cpu policy to set in flavor
        flavor_auto_recovery (str|None): None (unset) or true or false
        image_auto_recovery (str|None): None (unset) or true or false
        disk_format (str):
        container_format (str):
        expt_result (bool): Expected vm auto recovery behavior. False > disabled, True > enabled.

    Test Steps:
        - Create a flavor with auto recovery and cpu policy set to given values in extra spec
        - Create an image with auto recovery set to given value in metadata
        - Boot a vm with the flavor and from the image
        - Set vm state to error via nova reset-state
        - Verify vm auto recovery behavior is as expected

    Teardown:
        - Delete created vm, volume, image, flavor

    """

    LOG.tc_step("Create a flavor with cpu_policy set to {} and auto_recovery set to {} in extra spec".format(
            cpu_policy, flavor_auto_recovery))
    flavor_id = nova_helper.create_flavor(name='auto_recover_'+str(flavor_auto_recovery), cleanup='function')[1]

    # Add extra specs as specified
    extra_specs = {}
    if cpu_policy is not None:
        extra_specs[FlavorSpec.CPU_POLICY] = cpu_policy
    if flavor_auto_recovery is not None:
        extra_specs[FlavorSpec.AUTO_RECOVERY] = flavor_auto_recovery

    if extra_specs:
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    property_key = ImageMetadata.AUTO_RECOVERY
    LOG.tc_step("Create an image with property auto_recovery={}, disk_format={}, container_format={}".
                format(image_auto_recovery, disk_format, container_format))
    if image_auto_recovery is None:
        image_id = glance_helper.create_image(disk_format=disk_format, container_format=container_format,
                                              cleanup='function')[1]
    else:
        image_id = glance_helper.create_image(disk_format=disk_format, container_format=container_format,
                                              cleanup='function', **{property_key: image_auto_recovery})[1]

    # auto recovery in image metadata will not work if vm booted from volume
    # LOG.tc_step("Create a volume from the image")
    # vol_id = cinder_helper.create_volume(name='auto_recov', image_id=image_id, rtn_exist=False)[1]
    # ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from image with auto recovery - {} and using the flavor with auto recovery - {}".format(
                image_auto_recovery, flavor_auto_recovery))
    vm_id = vm_helper.boot_vm(name='auto_recov', flavor=flavor_id, source='image', source_id=image_id,
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.tc_step("Verify vm auto recovery is {} by setting vm to error state.".format(expt_result))
    vm_helper.set_vm_state(vm_id=vm_id, error_state=True, fail_ok=False)
    res_bool, actual_val = vm_helper.wait_for_vm_values(vm_id=vm_id, status=VMStatus.ACTIVE, fail_ok=True,
                                                        timeout=600)

    assert expt_result == res_bool, "Expected auto_recovery: {}. Actual vm status: {}".format(
            expt_result, actual_val)

    LOG.tc_step("Ensure vm is pingable after auto recovery")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Esempio n. 8
0
    def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci,
                                                       vifs):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id,
                tenant_net_id, internal_net_id, seg_id
            vifs (list): list of vifs to add to same internal net

        Setups:
            - Create a flavor with dedicated cpu policy (class)
            - Choose management net, one tenant net, and internal0-net1 to be
            used by test (class)
            - Boot a base pci-sriov vm - vm1 with above flavor and networks,
            ping it from NatBox (class)
            - Ping vm1 from itself over data, and internal networks

        Test Steps:
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with vm1,
                and ping it from NatBox
            - Ping vm2's own data and internal network ips
            - Ping vm2 from vm1 to verify management and data networks
            connection
            - Perform one of the following actions on vm2
                - set to error/ wait for auto recovery
                - suspend/resume
                - cold migration
                - pause/unpause
            - Update vlan interface to proper eth if pci-passthrough device
            moves to different eth
            - Verify ping from vm1 to vm2 over management and data networks
            still works
            - Repeat last 3 steps with different vm actions

        Teardown:
            - Delete created vms and flavor
        """

        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        pcipt_included = False
        internal_net_id = None
        for vif in vifs:
            if not isinstance(vif, str):
                vif = vif[0]
            if 'pci-passthrough' in vif:
                if not avail_pcipt_net:
                    skip(SkipHostIf.PCIPT_IF_UNAVAIL)
                internal_net_id = avail_pcipt_net
                pcipt_included = True
                continue
            elif 'pci-sriov' in vif:
                if not avail_sriov_net:
                    skip(SkipHostIf.SRIOV_IF_UNAVAIL)
                internal_net_id = avail_sriov_net

        assert internal_net_id, "test script error. Internal net should have " \
                                "been determined."

        nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id,
                                                nics=base_nics)
        if pcipt_included and extra_pcipt_net:
            nics.append(
                {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})

        img_id = None
        if glance_vif:
            img_id = glance_helper.create_image(name=glance_vif,
                                                hw_vif_model=glance_vif,
                                                cleanup='function')[1]

        LOG.tc_step("Boot a vm with following vifs on same internal net: "
                    "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False, image_id=img_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, "
            "and internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        for vm_actions in [['auto_recover'], ['cold_migrate'],
                           ['pause', 'unpause'], ['suspend', 'resume']]:
            if 'auto_recover' in vm_actions:
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, "
                    "then verify ping from base vm over management and "
                    "internal networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=False, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    vm_helper.perform_action_on_vm(vm_under_test, action=action)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test)
            if pcipt_included:
                LOG.tc_step(
                    "Bring up vlan interface for pci-passthrough vm {}.".format(
                        vm_under_test))
                vm_helper.add_vlan_for_vm_pcipt_interfaces(
                    vm_id=vm_under_test, net_seg_id=pcipt_seg_ids)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and internal networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_pci,
                                       net_types=['mgmt', 'internal'])
Esempio n. 9
0
    def test_multiports_on_same_network_vm_actions(self, vifs, base_setup):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm
                with specified (vif_mode, pci_address)
            base_setup (list): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used
            by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks
            connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks
            still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = \
            base_setup

        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        for vm_actions in [['auto_recover'],
                           ['cold_migrate'],
                           ['pause', 'unpause'],
                           ['suspend', 'resume'],
                           ['hard_reboot']]:
            if vm_actions[0] == 'auto_recover':
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, then verify ping from "
                    "base vm over management and data networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=True, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    if 'migrate' in action and system_helper.is_aio_simplex():
                        continue

                    kwargs = {}
                    if action == 'hard_reboot':
                        action = 'reboot'
                        kwargs['hard'] = True
                    kwargs['action'] = action

                    vm_helper.perform_action_on_vm(vm_under_test, **kwargs)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

            # LOG.tc_step("Verify vm pci address preserved after {}".format(
            # vm_actions))
            # check_helper.check_vm_pci_addr(vm_under_test, nics)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and data networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm,
                                       net_types=['mgmt', 'data'])
Esempio n. 10
0
def test_vm_vcpu_model(vcpu_model, vcpu_source, boot_source,
                       cpu_models_supported):
    """
    Test vcpu model specified in flavor will be applied to vm. In case host does not support specified vcpu model,
    proper error message should be displayed in nova show.

    Args:
        vcpu_model
        vcpu_source
        boot_source

    Test Steps:
        - Set flavor extra spec or image metadata with given vcpu model.
        - Boot a vm from volume/image
        - Stop and then start vm and ensure that it retains its cpu model
        - If vcpu model is supported by host,
            - Check vcpu model specified in flavor/image is used by vm via virsh, ps aux (and /proc/cpuinfo)
            - Live migrate vm and check vcpu model again
            - Cold migrate vm and check vcpu model again
        - If vcpu model is not supported by host, check proper error message is included if host does not
            support specified vcpu model.
    Teardown:
        - Delete created vm, volume, image, flavor

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    flv_model = vcpu_model if vcpu_source == 'flavor' else None
    img_model = vcpu_model if vcpu_source == 'image' else None
    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    is_supported = (not vcpu_model) or (vcpu_model == 'Passthrough') or (
        vcpu_model in all_cpu_models_supported)
    if not is_supported:
        LOG.tc_step(
            "Check vm in error state due to vcpu model unsupported by hosts.")
        assert 1 == code, "boot vm cli exit code is not 1. Actual fail reason: {}".format(
            msg)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_UNAVAIL
        res_bool, vals = vm_helper.wait_for_vm_values(vm,
                                                      10,
                                                      regex=True,
                                                      strict=False,
                                                      status='ERROR')
        err = vm_helper.get_vm_fault_message(vm)

        assert res_bool, "VM did not reach expected error state. Actual: {}".format(
            vals)
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
        return

    # System supports specified vcpu, continue to verify
    expt_arch = None
    if vcpu_model == 'Passthrough':
        host = vm_helper.get_vm_host(vm)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Check vm is launched with expected vcpu model")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    multi_hosts_supported = (not vcpu_model) or (vcpu_model in cpu_models_multi_host) or \
                            (vcpu_model == 'Passthrough' and cpu_models_multi_host)
    # TC5141
    LOG.tc_step(
        "Stop and then restart vm and check if it retains its vcpu model")
    vm_helper.stop_vms(vm)
    vm_helper.start_vms(vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    if not multi_hosts_supported:
        LOG.info(
            "Skip migration steps. Less than two hosts in same storage aggregate support {}"
            .format(vcpu_model))
        return

    LOG.tc_step(
        "Live (block) migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.live_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)

    LOG.tc_step("Cold migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.cold_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)
Esempio n. 11
0
    def test_pci_vm_nova_actions(self, pci_numa_affinity,
                                 pci_irq_affinity_mask, pci_alias,
                                 vif_model_check, pci_dev_numa_nodes):
        """
        Test vm actions on vm with multiple ports with given vif models on the same tenant network

        Args:

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify ping from vm1 to vm2 over management and data networks still works
            - Verify the correct number of PCI devices are created, in correct types,
                    the numa node of the PCI devices aligns with that of CPUs, and affined CPUs for PCI devices
                    are same as specified by 'pci_alias' (if applicable)

        Teardown:
            - Delete created vms and flavor
        """
        pci_irq_affinity_mask, pci_alias = _convert_irqmask_pcialias(
            pci_irq_affinity_mask, pci_alias)
        boot_forbidden = False
        migrate_forbidden = False
        if pci_numa_affinity == 'required' and pci_alias is not None:
            host_count = pci_dev_numa_nodes
            if host_count == 0:
                boot_forbidden = True
            elif host_count == 1:
                migrate_forbidden = True
        LOG.tc_step(
            "Expected result - Disallow boot: {}; Disallow migrate: {}".format(
                boot_forbidden, migrate_forbidden))

        self.pci_numa_affinity = pci_numa_affinity
        self.pci_alias = pci_alias
        self.pci_irq_affinity_mask = pci_irq_affinity_mask

        if pci_alias is not None:
            LOG.info('Check if PCI-Alias devices existing')
            self.is_pci_device_supported(pci_alias)

        self.vif_model, self.base_vm, self.base_flavor_id, self.nics_to_test, self.seg_id, \
            self.pnet_name, self.extra_pcipt_net = vif_model_check

        LOG.tc_step(
            "Create a flavor with specified extra-specs and dedicated cpu policy"
        )
        flavor_id = self.create_flavor_for_pci()

        LOG.tc_step("Boot a vm with {} vif model on internal net".format(
            self.vif_model))
        # TODO: feature unavailable atm. Update required
        # resource_param = 'pci_vfs_used' if 'sriov' in self.vif_model else 'pci_pfs_used'
        # LOG.tc_step("Get resource usage for {} interface before booting VM(s)".format(self.vif_model))
        # pre_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)

        res, vm_id, err = vm_helper.boot_vm(name=self.vif_model,
                                            flavor=flavor_id,
                                            cleanup='function',
                                            nics=self.nics_to_test,
                                            fail_ok=boot_forbidden)
        if boot_forbidden:
            assert res > 0, "VM booted successfully while it numa node for pcipt/sriov and pci alias mismatch"
            return

        self.vm_id = vm_id

        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=self.seg_id,
                                                       init_conf=True)

        LOG.tc_step("Ping vm over mgmt and internal nets from base vm")
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.vm_id,
                                   net_types=['mgmt', 'internal'])
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        self.vm_topology = vm_helper.get_vm_values(
            vm_id=self.vm_id, fields='wrs-res:topology')[0]
        vnic_type = 'direct' if self.vif_model == 'pci-sriov' else 'direct-physical'
        self.pci_nics = vm_helper.get_vm_nics_info(vm_id=self.vm_id,
                                                   vnic_type=vnic_type)
        assert self.pci_nics

        self.wait_check_vm_states(step='boot')

        # TODO: feature unavailable atm. Update required
        # LOG.tc_step("Check {} usage is incremented by 1".format(resource_param))
        # post_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)
        # expt_change = 2 if self.vif_model == 'pci-passthrough' and self.extra_pcipt_net else 1
        # assert pre_resource_value + expt_change == post_resource_value, "{} usage is not incremented by {} as " \
        #                                                                 "expected".format(resource_param, expt_change)

        LOG.tc_step('Pause/Unpause {} vm'.format(self.vif_model))
        vm_helper.pause_vm(self.vm_id)
        vm_helper.unpause_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after pause/unpause"
        )
        self.wait_check_vm_states(step='pause/unpause')

        LOG.tc_step('Suspend/Resume {} vm'.format(self.vif_model))
        vm_helper.suspend_vm(self.vm_id)
        vm_helper.resume_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after suspend/resume"
        )
        self.wait_check_vm_states(step='suspend/resume')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Cold migrate {} vm'.format(self.vif_model))
        code, msg = vm_helper.cold_migrate_vm(self.vm_id,
                                              fail_ok=migrate_forbidden)
        if migrate_forbidden:
            assert code > 0, "Expect migrate fail due to no other host has pcipt/sriov and pci-alias on same numa. " \
                             "Actual: {}".format(msg)
        self.wait_check_vm_states(step='cold-migrate')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after cold migration"
        )
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Set vm to error and wait for it to be auto recovered')
        vm_helper.set_vm_state(vm_id=self.vm_id,
                               error_state=True,
                               fail_ok=False)
        vm_helper.wait_for_vm_values(vm_id=self.vm_id,
                                     status=VMStatus.ACTIVE,
                                     fail_ok=False,
                                     timeout=600)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after auto recovery"
        )
        self.wait_check_vm_states(step='set-error-state-recover')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step("Hard reboot {} vm".format(self.vif_model))
        vm_helper.reboot_vm(self.vm_id, hard=True)
        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after nova reboot hard"
        )
        self.wait_check_vm_states(step='hard-reboot')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step(
            "Create a flavor with dedicated cpu policy and resize vm to new flavor"
        )
        resize_flavor = nova_helper.create_flavor(name='dedicated',
                                                  ram=2048,
                                                  cleanup='function')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=resize_flavor, **extra_specs)
        vm_helper.resize_vm(self.vm_id, resize_flavor)

        LOG.tc_step("Check vm still reachable after resize")
        self.wait_check_vm_states(step='resize')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])