Пример #1
0
def test_vcpu_model_and_thread_policy(vcpu_model, thread_policy,
                                      cpu_models_supported):
    """
    Launch vm with vcpu model spec and cpu thread policy both set
    Args:
        vcpu_model (str):
        thread_policy (str):
        cpu_models_supported (tuple): fixture

    Test Steps:
        - create flavor with vcpu model and cpu thread extra specs set
        - boot vm from volume with above flavor
        - if no hyperthreaded host, check vm failed to schedule
        - otherwise check vcpu model and cpu thread policy both set as expected

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    is_supported = (vcpu_model
                    == 'Passthrough') or (vcpu_model
                                          in all_cpu_models_supported)
    if not is_supported:
        skip("{} is not supported by any hypervisor".format(vcpu_model))

    name = '{}_{}'.format(vcpu_model, thread_policy)
    flv_id = nova_helper.create_flavor(name=name, vcpus=2)[1]
    ResourceCleanup.add('flavor', flv_id)
    nova_helper.set_flavor(flavor=flv_id,
                           **{
                               FlavorSpec.VCPU_MODEL: vcpu_model,
                               FlavorSpec.CPU_POLICY: 'dedicated',
                               FlavorSpec.CPU_THREAD_POLICY: thread_policy
                           })

    code, vm, msg = vm_helper.boot_vm(name=name,
                                      flavor=flv_id,
                                      fail_ok=True,
                                      cleanup='function')
    ht_hosts = host_helper.get_hypersvisors_with_config(hyperthreaded=True,
                                                        up_only=True)
    if thread_policy == 'require' and not ht_hosts:
        assert 1 == code

    else:
        assert 0 == code, "VM is not launched successfully"
        check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model)
        vm_host = vm_helper.get_vm_host(vm)
        check_helper.check_topology_of_vm(vm_id=vm,
                                          vcpus=2,
                                          cpu_pol='dedicated',
                                          cpu_thr_pol=thread_policy,
                                          numa_num=1,
                                          vm_host=vm_host)
Пример #2
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Пример #3
0
def check_shared_vcpu(vm, vcpus, prev_total_vcpus, numa_node0=None, numa_nodes=None, shared_vcpu=None,
                      expt_increase=None, min_vcpus=None):

    host, vm_numa_nodes = vm_helper.get_vm_host_and_numa_nodes(vm_id=vm)
    if shared_vcpu is not None:
        host_shared_vcpu_dict = host_helper.get_host_cpu_cores_for_function(host, func='Shared', thread=None)
        if numa_nodes is None:
            numa_nodes = 1
        if numa_node0 is None:
            numa_node0 = vm_numa_nodes[0]

        if numa_nodes == 1:
            host_shared_vcpu = host_shared_vcpu_dict[numa_node0]
        else:
            host_shared_vcpu = host_shared_vcpu_dict[0] + host_shared_vcpu_dict[1]
        vm_shared_pcpu = vm_helper.get_instance_topology(vm)[0]['shared_pcpu'][0]
        assert vm_shared_pcpu in host_shared_vcpu

    if min_vcpus is None:
        min_vcpus = vcpus

    check_helper.check_topology_of_vm(vm, vcpus, prev_total_cpus=prev_total_vcpus[host], shared_vcpu=shared_vcpu,
                                      cpu_pol='dedicated', expt_increase=expt_increase,
                                      numa_num=numa_nodes, min_vcpus=min_vcpus)
Пример #4
0
def test_cpu_realtime_vm_actions(vcpus, cpu_rt, rt_mask, rt_source, shared_vcpu, numa_nodes, cpu_thread, check_hosts):
    """
    Test vm with realtime cpu policy specified in flavor
    Args:
        vcpus (int):
        cpu_rt (str|None):
        rt_source (str): flavor or image
        rt_mask (str):
        shared_vcpu (int|None):min_vcpus
        numa_nodes (int|None): number of numa_nodes to boot vm on
        cpu_thread
        check_hosts (tuple): test fixture

    Setups:
        - check storage backing and whether system has shared cpu configured

    Test Steps:
        - Create a flavor with given cpu realtime, realtime mask and shared vcpu extra spec settings
        - Create a vm with above flavor
        - Verify cpu scheduler policies via virsh dumpxml and ps
        - Perform following nova actions and repeat above step after each action:
            ['suspend', 'resume'],
            ['live_migrate'],
            ['cold_migrate'],
            ['rebuild']

    """
    storage_backing, hosts_with_shared_cpu, ht_hosts = check_hosts

    if cpu_thread == 'require' and len(ht_hosts) < 2:
        skip("Less than two hyperthreaded hosts")

    if shared_vcpu is not None and len(hosts_with_shared_cpu) < 2:
        skip("Less than two up hypervisors configured with shared cpu")

    cpu_rt_flv = cpu_rt
    if rt_source == 'image':
        # rt_mask_flv = cpu_rt_flv = None
        rt_mask_flv = '^0'
        rt_mask_img = rt_mask
    else:
        rt_mask_flv = rt_mask
        rt_mask_img = None

    image_id = None
    if rt_mask_img is not None:
        image_metadata = {ImageMetadata.CPU_RT_MASK: rt_mask_img}
        image_id = glance_helper.create_image(name='rt_mask', cleanup='function', **image_metadata)[1]

    vol_id = cinder_helper.create_volume(source_id=image_id)[1]
    ResourceCleanup.add('volume', vol_id)

    name = 'rt-{}_mask-{}_{}vcpu'.format(cpu_rt, rt_mask_flv, vcpus)
    flv_id = create_rt_flavor(vcpus, cpu_pol='dedicated', cpu_rt=cpu_rt_flv, rt_mask=rt_mask_flv,
                              shared_vcpu=shared_vcpu, numa_nodes=numa_nodes, cpu_thread=cpu_thread,
                              storage_backing=storage_backing)[0]

    LOG.tc_step("Boot a vm with above flavor")
    vm_id = vm_helper.boot_vm(name=name, flavor=flv_id, cleanup='function', source='volume', source_id=vol_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    expt_rt_cpus, expt_ord_cpus = parse_rt_and_ord_cpus(vcpus=vcpus, cpu_rt=cpu_rt, cpu_rt_mask=rt_mask)

    check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu)
    vm_host = vm_helper.get_vm_host(vm_id)
    if shared_vcpu:
        assert vm_host in hosts_with_shared_cpu

    numa_num = 1 if numa_nodes is None else numa_nodes
    check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, vm_host=vm_host)

    expt_current_cpu = vcpus
    # if min_vcpus is not None:
    #     GuestLogs.add(vm_id)
    #     LOG.tc_step("Scale down cpu once")
    #     vm_helper.scale_vm(vm_id, direction='down', resource='cpu')
    #     vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    #
    #     LOG.tc_step("Check current vcpus in nova show is reduced after scale down")
    #     expt_current_cpu -= 1
    #     check_helper.check_vm_vcpus_via_nova_show(vm_id, min_vcpus, expt_current_cpu, vcpus)

    for actions in [['suspend', 'resume'], ['stop', 'start'], ['live_migrate'], ['cold_migrate'], ['rebuild']]:
        LOG.tc_step("Perform {} on vm and check realtime cpu policy".format(actions))
        for action in actions:
            kwargs = {}
            if action == 'rebuild':
                kwargs = {'image_id': image_id}
            vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host_post_action = vm_helper.get_vm_host(vm_id)
        if shared_vcpu:
            assert vm_host_post_action in hosts_with_shared_cpu

        LOG.tc_step("Check cpu thread policy in vm topology and vcpus in nova show after {}".format(actions))
        check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, numa_num=numa_num,
                                          vm_host=vm_host_post_action, current_vcpus=expt_current_cpu)

        check_virsh = True
        offline_cpu = None

        check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu,
                                               offline_cpus=offline_cpu, check_virsh_vcpusched=check_virsh)
Пример #5
0
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Пример #6
0
def _test_cpu_pol_dedicated_shared_coexists(vcpus_dedicated, vcpus_shared, pol_source, boot_source):
    """
    Test two vms coexisting on the same host, one with the dedicated cpu property, and one with the shared cpu property.

    Args:
        vcpus_dedicated: Amount of vcpu(s) to allocate for the vm with the dedicated CPU_POLICY.
        vcpus_shared: Amount of vcpu(s) to allocate for the vm with the shared CPU_POLICY.
        pol_source: Where the CPU_POLICY is set from.
        boot_source: The boot media the vm will use to boot.

    Test Setups:
        - Create two flavors, one for each vm.
        - If using 'flavor' for pol_source, set extra specs for the CPU_POLICY.
        - If using 'image' for pol_source, set ImageMetaData for the CPU_POLICY.
        - If using 'volume' for boot_source, create volume from tis image.
        - If using 'image' for boot_source, use tis image.
        - Determine the amount of free vcpu(s) on the compute before testing.

    Test Steps:
        - Boot the first vm with CPU_POLICY: dedicated.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Determine the amount of free vcpu(s) on the compute.
        - Boot the second vm with CPU_POLICY: shared.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Delete vms
        - Determine the amount of free vcpu(s) on the compute after testing.
        - Compare free vcpu(s) on the compute before and after testing, ensuring they are the same.

    Test Teardown
        - Delete created volumes and flavors
    """
    LOG.tc_step("Getting host list")
    target_hosts = host_helper.get_hypervisors(state='up')
    target_host = target_hosts[0]
    storage_backing = host_helper.get_host_instance_backing(host=target_host)
    if 'image' in storage_backing:
        storage_backing = 'local_image'
    elif 'remote' in storage_backing:
        storage_backing = 'remote'

    image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)
    pre_test_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    collection = ['dedicated', 'shared']
    vm_ids = []
    for x in collection:
        if x == 'dedicated':
            vcpus = vcpus_dedicated
        else:
            vcpus = vcpus_shared
        LOG.tc_step("Create {} flavor with {} vcpus".format(x, vcpus))
        flavor_id = nova_helper.create_flavor(name=x, vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_id)

        if pol_source == 'flavor':
            LOG.tc_step("Set CPU_POLICY for {} flavor".format(x))
            specs = {FlavorSpec.CPU_POLICY: x}
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            LOG.tc_step("Create image with CPU_POLICY: {}".format(x))
            image_meta = {ImageMetadata.CPU_POLICY: x}
            image_id = glance_helper.create_image(name='cpu_pol_{}'.format(x), cleanup='function', **image_meta)[1]

        if boot_source == 'volume':
            LOG.tc_step("Create volume from image")
            source_id = cinder_helper.create_volume(name='cpu_pol_{}'.format(x), source_id=image_id)[1]
            ResourceCleanup.add('volume', source_id)
        else:
            source_id = image_id

        pre_boot_cpus = host_helper.get_vcpus_for_computes(field='used_now')
        LOG.tc_step("Booting cpu_pol_{}".format(x))
        vm_id = vm_helper.boot_vm(name='cpu_pol_{}'.format(x), flavor=flavor_id, source=boot_source,
                                  source_id=source_id, avail_zone='nova', vm_host=target_host, cleanup='function')[1]

        vm_ids.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, cpu_pol=x, vm_host=target_host,
                                          prev_total_cpus=pre_boot_cpus[target_host])

    LOG.tc_step("Deleting both dedicated and shared vms")
    vm_helper.delete_vms(vms=vm_ids)

    post_delete_cpus = host_helper.get_vcpus_for_computes(field='used_now')
    assert post_delete_cpus == pre_test_cpus, "vcpu count after test does not equal vcpu count before test"