Esempio n. 1
0
def test_vcpu_model_flavor_and_image(flv_model, img_model, boot_source, error,
                                     cpu_models_supported):
    """
    Test when vcpu model is set in both flavor and image
    Args:
        flv_model (str): vcpu model flavor extra spec setting
        img_model (str): vcpu model metadata in image
        boot_source (str): launch vm from image or volume
        error (str|None): whether an error is expected with given flavor/image vcpu settings
        cpu_models_supported (tuple): fixture

    Test steps:
        - Create a flavor and set vcpu model spec as specified
        - Create an image and set image metadata as specified
        - Launch a vm from image/volume using above flavor and image
        - If error is specified, check cpu model conflict error is displayed in nova show
        - Otherwise check vm is launched successfully and expected cpu model is used

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    if not error:
        if flv_model != 'Passthrough' and (flv_model
                                           not in all_cpu_models_supported):
            skip("vcpu model {} is not supported by system".format(flv_model))

    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    if error:
        assert 1 == code
        vm_helper.wait_for_vm_values(vm,
                                     10,
                                     regex=True,
                                     strict=False,
                                     status='ERROR',
                                     fail_ok=False)
        err = vm_helper.get_vm_fault_message(vm)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_CONFLICT
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
    else:
        assert 0 == code, "Boot vm failed when cpu model in flavor and image both set to: {}".format(
            flv_model)
        check_vm_cpu_model(vm_id=vm, vcpu_model=flv_model)
Esempio n. 2
0
    def test_launch_vm_shared_cpu_setting_negative(self, vcpus, cpu_policy, shared_vcpu,
                                                   check_numa_num, remove_shared_cpu):
        """
        Test boot vm cli returns error when system does not meet the shared cpu requirement(s) in given flavor

        Args:
            vcpus (int): number of vcpus to set when creating flavor
            cpu_policy (str): 'dedicated' or 'shared' to set in flavor extra specs
            shared_vcpu (int):
            check_numa_num (int)
            remove_shared_cpu (tuple)

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, number of numa nodes, nume_node.0 , shared_vcpu values to flavor extra specs
            - Attempt to boot a vm with the flavor
            - Ensure proper error is returned

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)

        """

        # if (numa_node0 == 1 or numa_nodes == 2) and check_numa_num < 2:
        #     skip('At least 2 processors are required on compute host to launch vm with numa_nodes=2 or numa_node.0=1')

        storage_backing, avail_zone = remove_shared_cpu
        LOG.tc_step("Create flavor with given numa configs")
        flavor = create_shared_flavor(vcpus=vcpus, cpu_policy=cpu_policy, storage_backing=storage_backing,
                                      shared_vcpu=shared_vcpu)

        LOG.tc_step("Attempt to launch a vm with conflig numa node requirements")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu_negative', flavor=flavor, fail_ok=True,
                                                cleanup='function', avail_zone=avail_zone)

        assert 1 == code, 'Expect boot vm cli return error, although vm is booted anyway. Actual: {}'.format(output)
        LOG.tc_step("Ensure vm is in error state with expected fault message in nova show")
        vm_helper.wait_for_vm_values(vm_id, 10, status='ERROR', fail_ok=False)
        actual_fault = vm_helper.get_vm_fault_message(vm_id)
        expt_fault = 'Shared vCPU not enabled on host cell'

        assert expt_fault in actual_fault, "Expected fault message mismatch"
Esempio n. 3
0
    def test_launch_vm_with_shared_cpu(self, vcpus, shared_vcpu, error, add_shared_cpu, origin_total_vcpus):
        """
        Test boot vm cli returns error when system does not meet the shared cpu requirement(s) in given flavor

        Args:
            vcpus (int): number of vcpus to set when creating flavor
            shared_vcpu (int):
            error
            add_shared_cpu
            origin_total_vcpus

        Setup:
            - Configure one compute to have shared cpus via 'system host-cpu-modify -f shared p0=1,p1=1 <hostname>'

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, number of numa nodes, nume_node.0 , shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Validate the shared cpu
            - Live migrate the vm
            - Re-validate the shared cpu
            - Cold migrate the vm
            - Re-validate the shared cpu

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)
            - Set shared cpus to 0 (default setting) on the compute node under test (module)

        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        LOG.tc_step("Create a flavor with given number of vcpus")

        flavor = create_shared_flavor(vcpus, storage_backing=storage_backing, shared_vcpu=shared_vcpu)

        LOG.tc_step("Boot a vm with above flavor")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=True, cleanup='function')

        if error:
            LOG.tc_step("Check vm boot fail")
            assert 1 == code, "Expect error vm. Actual result: {}".format(output)
            LOG.tc_step("Ensure vm is in error state with expected fault message in nova show")
            vm_helper.wait_for_vm_values(vm_id, 10, status='ERROR', fail_ok=False)
            actual_fault = vm_helper.get_vm_fault_message(vm_id)
            expt_fault = 'shared vcpu with 0 requested dedicated vcpus is not allowed'
            assert expt_fault in actual_fault, "Expected fault message mismatch"
            return

        LOG.tc_step("Check vm booted successfully and shared cpu indicated in vm-topology")
        assert 0 == code, "Boot vm failed. Details: {}".format(output)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # live migrate
        LOG.tc_step("Live migrate vm and then ping vm from NatBox")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # cold migrate
        LOG.tc_step("Cold migrate vm and then ping vm from NatBox")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)
Esempio n. 4
0
def test_vm_vcpu_model(vcpu_model, vcpu_source, boot_source,
                       cpu_models_supported):
    """
    Test vcpu model specified in flavor will be applied to vm. In case host does not support specified vcpu model,
    proper error message should be displayed in nova show.

    Args:
        vcpu_model
        vcpu_source
        boot_source

    Test Steps:
        - Set flavor extra spec or image metadata with given vcpu model.
        - Boot a vm from volume/image
        - Stop and then start vm and ensure that it retains its cpu model
        - If vcpu model is supported by host,
            - Check vcpu model specified in flavor/image is used by vm via virsh, ps aux (and /proc/cpuinfo)
            - Live migrate vm and check vcpu model again
            - Cold migrate vm and check vcpu model again
        - If vcpu model is not supported by host, check proper error message is included if host does not
            support specified vcpu model.
    Teardown:
        - Delete created vm, volume, image, flavor

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    flv_model = vcpu_model if vcpu_source == 'flavor' else None
    img_model = vcpu_model if vcpu_source == 'image' else None
    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    is_supported = (not vcpu_model) or (vcpu_model == 'Passthrough') or (
        vcpu_model in all_cpu_models_supported)
    if not is_supported:
        LOG.tc_step(
            "Check vm in error state due to vcpu model unsupported by hosts.")
        assert 1 == code, "boot vm cli exit code is not 1. Actual fail reason: {}".format(
            msg)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_UNAVAIL
        res_bool, vals = vm_helper.wait_for_vm_values(vm,
                                                      10,
                                                      regex=True,
                                                      strict=False,
                                                      status='ERROR')
        err = vm_helper.get_vm_fault_message(vm)

        assert res_bool, "VM did not reach expected error state. Actual: {}".format(
            vals)
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
        return

    # System supports specified vcpu, continue to verify
    expt_arch = None
    if vcpu_model == 'Passthrough':
        host = vm_helper.get_vm_host(vm)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Check vm is launched with expected vcpu model")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    multi_hosts_supported = (not vcpu_model) or (vcpu_model in cpu_models_multi_host) or \
                            (vcpu_model == 'Passthrough' and cpu_models_multi_host)
    # TC5141
    LOG.tc_step(
        "Stop and then restart vm and check if it retains its vcpu model")
    vm_helper.stop_vms(vm)
    vm_helper.start_vms(vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    if not multi_hosts_supported:
        LOG.info(
            "Skip migration steps. Less than two hosts in same storage aggregate support {}"
            .format(vcpu_model))
        return

    LOG.tc_step(
        "Live (block) migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.live_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)

    LOG.tc_step("Cold migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.cold_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)
Esempio n. 5
0
def test_server_group_boot_vms(policy, vms_num, check_system):
    """
    Test server group policy and messaging
    Test live migration with anti-affinity server group (TC6566)
    Test changing size of existing server group via CLI (TC2917)

    Args:
        policy (str): server group policy to set when creating the group
        vms_num (int): number of vms to boot

    Test Steps:
        - Create a server group with given policy
        - Add given metadata to above server group
        - Boot vm(s) with above server group
        - Verify vm(s) booted successfully and is a member of the server group
        - Verify that all vms have the server group listed in nova show
        - If more than 1 hypervisor available:
            - Attempt to live/cold migrate one of the vms, and check they succeed/fail based on
                server group setting

    Teardown:
        - Delete created vms, flavor, server group

    """
    hosts, storage_backing, up_hypervisors = check_system
    host_count = len(hosts)
    if host_count < 2 and policy == 'anti_affinity':
        skip(
            "Skip anti_affinity strict for system with 1 up host in storage aggregate"
        )

    flavor_id, srv_grp_id = create_flavor_and_server_group(
        storage_backing=storage_backing, policy=policy)
    vm_hosts = []
    members = []
    failed_num = 0
    if policy == 'anti_affinity' and vms_num > host_count:
        failed_num = vms_num - host_count
        vms_num = host_count

    LOG.tc_step(
        "Boot {} vm(s) with flavor {} in server group {} and ensure they are "
        "successfully booted.".format(vms_num, flavor_id, srv_grp_id))

    for i in range(vms_num):
        vm_id = vm_helper.boot_vm(name='srv_grp',
                                  flavor=flavor_id,
                                  hint={'group': srv_grp_id},
                                  fail_ok=False,
                                  cleanup='function')[1]

        LOG.tc_step("Check vm {} is in server group {}".format(
            vm_id, srv_grp_id))
        members = nova_helper.get_server_group_info(srv_grp_id,
                                                    headers='Members')[0]
        assert vm_id in members, "VM {} is not a member of server group {}".format(
            vm_id, srv_grp_id)

        vm_hosts.append(vm_helper.get_vm_host(vm_id))

    for i in range(failed_num):
        LOG.tc_step(
            "Boot vm{} in server group {} that's expected to fail".format(
                i, srv_grp_id))
        code, vm_id, err = vm_helper.boot_vm(name='srv_grp',
                                             flavor=flavor_id,
                                             hint={'group': srv_grp_id},
                                             fail_ok=True,
                                             cleanup='function')

        vm_helper.get_vm_fault_message(vm_id)
        assert 1 == code, "Boot vm is not rejected"

    unique_vm_hosts = list(set(vm_hosts))
    if policy in ('affinity', 'soft_affinity') or host_count == 1:
        assert 1 == len(unique_vm_hosts)
    else:
        assert len(unique_vm_hosts) == min(vms_num, host_count), \
            "Improper VM hosts for anti-affinity policy"

    assert len(members) == vms_num

    for vm in members:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    if host_count > 1:
        # TC6566 verified here
        expt_fail = policy == 'affinity' or (policy == 'anti_affinity'
                                             and host_count - vms_num < 1)

        for action in ('live_migrate', 'cold_migrate'):
            LOG.tc_step("Attempt to {} VMs and ensure it {}".format(
                action, 'fails' if expt_fail else 'pass'))
            vm_hosts_after_mig = []
            for vm in members:
                code, output = vm_helper.perform_action_on_vm(vm,
                                                              action=action,
                                                              fail_ok=True)
                if expt_fail:
                    assert 1 == code, "{} was not rejected. {}".format(
                        action, output)
                else:
                    assert 0 == code, "{} failed. {}".format(action, output)
                vm_host = vm_helper.get_vm_host(vm)
                vm_hosts_after_mig.append(vm_host)
                vm_helper.wait_for_vm_pingable_from_natbox(vm)

            if policy == 'affinity':
                assert len(list(set(vm_hosts_after_mig))) == 1
            elif policy == 'anti_affinity':
                assert len(list(set(vm_hosts_after_mig))) == vms_num, \
                    "Some VMs are on same host with strict anti-affinity polity"
Esempio n. 6
0
def _test_server_group_launch_vms_in_parallel(policy, min_count, max_count,
                                              check_system):
    """
    Test launch vms with server group in parallel using min_count, max_count param in nova boot

    Args:
        policy (str): affinity or anti_affinity
        check_system (tuple): test fixture

    Test Steps
        - Create a server group with given server group policy, group size and best effort flag
        - Create a flavor with storage backing supported
        - Boot a vm from image using above flavor and in above server group
        - Verify:
            - VMs status are as expected
            - Number of vms booted are as expected
            - All vms are in specified server group even if boot failed

    Teardown:
        - Delete created vms, flavor, server group

    """
    hosts, storage_backing, up_hypervisors = check_system
    host_count = len(up_hypervisors)
    if host_count == 1 and policy == 'anti_affinity':
        skip("Skip anti_affinity strict for system with 1 hypervisor")

    flavor_id, srv_grp_id = create_flavor_and_server_group(policy=policy)

    LOG.tc_step(
        "Boot vms with {} server group policy and min/max count".format(
            policy))
    code, vms, msg = vm_helper.boot_vm(name='srv_grp_parallel',
                                       flavor=flavor_id,
                                       hint={'group': srv_grp_id},
                                       fail_ok=True,
                                       min_count=min_count,
                                       max_count=max_count,
                                       cleanup='function')

    if max_count is None:
        max_count = min_count

    if policy == 'anti_affinity' and min_count > host_count:
        LOG.tc_step(
            "Check anti-affinity strict vms failed to boot when min_count > hosts_count"
        )
        assert 1 == code, msg
        expt_err = SrvGrpErr.HOST_UNAVAIL_ANTI_AFFINITY
        for vm in vms:
            fault = vm_helper.get_vm_fault_message(vm)
            assert expt_err in fault

    elif policy == 'anti_affinity' and max_count > host_count:
        LOG.tc_step("Check anti-affinity strict vms_count=host_count when "
                    "min_count <= hosts_count <= max_count")
        assert 0 == code, msg
        assert host_count == len(
            vms), "VMs number is not the same as qualified hosts number"

    else:
        LOG.tc_step(
            "Check vms_count=max_count when policy={} and host_count={}".
            format(policy, host_count))
        assert 0 == code, msg
        assert max_count == len(vms), "Expecting vms booted is the same as max count when " \
                                      "max count <= group size"

    # if code == 0:
    LOG.tc_step("Check vms are in server group {}: {}".format(srv_grp_id, vms))
    members = nova_helper.get_server_group_info(srv_grp_id,
                                                headers='Members')[0]
    assert set(vms) <= set(members), "Some vms are not in srv group"