Example #1
0
def test_something_avoid(modify_system_backing):
    """
    Test to AVOID! Do NOT parametrize module/class level fixture unless you are absolutely sure about the impact and
    intend to do so. Note that when a module level fixture is parametrized, both the setups AND teardowns will be run
    multiple times.

    Args:
        modify_system_backing:

    Setups:
        - Lock host, modify host storage backing to given backing, unlock host      (module)

    Test Steps:
        - Create a flavor with specified storage backing
        - Boot vm from above flavor

    Teardown:
        - Delete created vm, volume, flavor
        - Lock host, modify host storage backing to local_image, unlock host      (module)

    """
    LOG.tc_step("Create a flavor with specified storage backing")
    storage_backing = modify_system_backing
    flv_id = nova_helper.create_flavor(name='test_avoid_flv',
                                       storage_backing=storage_backing)[1]
    ResourceCleanup.add(resource_type='flavor', resource_id=flv_id)

    LOG.tc_step("Boot vm from above flavor")
    vm_id = vm_helper.boot_vm(name='test_avoid_vm', flavor=flv_id)[1]
    ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
Example #2
0
def test_multi_thread():
    LOG.tc_step("Create MThreads")
    thread_1 = MThread(func, 1, 10, extra_arg="Hello")
    thread_2 = MThread(func, 2, 6, extra_arg="Second")
    # runs after test steps complete
    thread_3 = MThread(func, 3, 20, extra_arg="run for a long time")
    thread_4 = MThread(nova_helper.create_flavor,
                       'threading',
                       'auto',
                       vcpus=2,
                       ram=1024)

    LOG.tc_step("Starting threads")
    thread_1.start_thread()
    thread_2.start_thread()
    thread_3.start_thread()
    thread_4.start_thread()
    LOG.tc_step("Finished starting threads")

    LOG.tc_step("Waiting for threads to finish")
    thread_1.wait_for_thread_end()
    thread_2.wait_for_thread_end()
    thread_4.wait_for_thread_end()
    LOG.tc_step("Threads have finished")

    id_ = thread_4.get_output()[1]
    LOG.info("flav_id = {}".format(id_))
    ResourceCleanup.add(resource_type='flavor', resource_id=id_)
Example #3
0
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
Example #4
0
def test_flavor_setting_numa_negative(vcpus, vswitch_affinity, numa_nodes,
                                      numa0, numa0_cpus, numa0_mem, numa1,
                                      numa1_cpus, numa1_mem, expt_err):

    LOG.tc_step("Create a 1024ram flavor with {} vcpus".format(vcpus))
    name = 'vswitch_affinity_{}_1G_{}cpu'.format(vswitch_affinity, vcpus)
    flv_id = nova_helper.create_flavor(name=name, vcpus=vcpus, ram=1024)[1]
    ResourceCleanup.add('flavor', flv_id)

    specs = {
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.NUMA_NODES: numa_nodes,
        FlavorSpec.VSWITCH_NUMA_AFFINITY: vswitch_affinity
    }
    tmp_dict = {
        FlavorSpec.NUMA_0: numa0,
        FlavorSpec.NUMA0_CPUS: numa0_cpus,
        FlavorSpec.NUMA0_MEM: numa0_mem,
        FlavorSpec.NUMA_1: numa1,
        FlavorSpec.NUMA1_CPUS: numa1_cpus,
        FlavorSpec.NUMA1_MEM: numa1_mem
    }

    for key, val in tmp_dict.items():
        if val is not None:
            specs[key] = val

    LOG.tc_step(
        "Attempt to set following extra spec to flavor {} and ensure it's rejected: {}"
        .format(flv_id, specs))
    code, output = nova_helper.set_flavor(flv_id, fail_ok=True, **specs)
    assert 1 == code, "Invalid extra spec is not rejected. Details: {}".format(
        output)
    assert eval(expt_err) in output, "Expected error message is not found"
def _flavors(hosts_pci_device_info):
    """
    Creates all flavors required for this test module
    """
    # Create flavor using first device.
    pci_alias = list(hosts_pci_device_info.values())[0][0]['pci_alias']
    flavor_parms = {'flavor_qat_vf_1': [2, 1024, 2, 1],
                    'flavor_resize_qat_vf_1': [4, 2048, 2, 1],
                    'flavor_qat_vf_4': [2, 1024, 2, 4],
                    'flavor_resize_qat_vf_4': [2, 2048, 2, 4],
                    'flavor_qat_vf_32': [2, 1024, 2, 32],
                    'flavor_qat_vf_33': [2, 1024, 2, 33],
                    'flavor_none': [1, 1024, 2, 0],
                    'flavor_resize_none': [2, 2048, 2, 0],
                    'flavor_resize_qat_vf_32': [4, 2048, 2, 32],
                    }

    flavors = {}
    for k, v in flavor_parms.items():
        vf = v[3]
        LOG.fixture_step("Create a flavor with {} Coletro Creek crypto VF....".format(vf))
        flavor_id = nova_helper.create_flavor(name=k, vcpus=v[0], ram=v[1], root_disk=v[2])[1]
        ResourceCleanup.add('flavor', flavor_id, scope='module')
        if vf > 0:
            extra_spec = {FlavorSpec.PCI_PASSTHROUGH_ALIAS: '{}:{}'.format(pci_alias, vf),
                          # FlavorSpec.NUMA_NODES: '2',
                          # feature deprecated. May need to update test case as well.
                          FlavorSpec.CPU_POLICY: 'dedicated'}

            nova_helper.set_flavor(flavor_id, **extra_spec)
        flavors[k] = flavor_id

    return flavors
    def create_flavors(self):
        LOG.tc_step('Create flavors')

        flavor_name_format = 'pve_flavor_{}'
        for sn in range(NUM_VM):
            name = flavor_name_format.format(sn)
            options = {
                'name': name,
                'vcpus': self.vcpus[sn],
                'ram': self.mem[sn],
                'root_disk': self.root_disk[sn],
                'is_public': True,
                'storage_backing': self.storage_backing,
            }
            if self.swap_disk:
                options['swap'] = self.swap_disk[sn]

            flavor_id = nova_helper.create_flavor(**options)[1]
            ResourceCleanup.add('flavor', flavor_id, scope='function')
            self.vms_info.update(
                {sn: {
                    'flavor_name': name,
                    'flavor_id': flavor_id
                }})

            # TODO create volume
        LOG.info('OK, flavors created:\n{}\n'.format(
            [vm['flavor_id'] for vm in self.vms_info.values()]))
Example #7
0
def volumes_(image_):
    """
    Text fixture to create two large cinder volumes with size of 20 and 40 GB.
    Args:
        image_: the guest image_id

    Returns: list of volume dict as following:
        {'id': <volume_id>,
         'display_name': <vol_inst1 or vol_inst2>,
         'size': <20 or 40>
        }
    """

    volumes = []
    cinder_params = [{
        'name': 'vol_inst1',
        'size': 20
    }, {
        'name': 'vol_inst2',
        'size': 40
    }]

    for param in cinder_params:
        volume_id = \
            cinder_helper.create_volume(name=param['name'], source_id=image_,
                                        size=param['size'])[1]
        volume = {
            'id': volume_id,
            'display_name': param['name'],
            'size': param['size']
        }
        volumes.append(volume)
        ResourceCleanup.add('volume', volume['id'], scope='function')

    return volumes
Example #8
0
def flavor_id_module():
    """
    Create basic flavor and volume to be used by test cases as test setup, at the beginning of the test module.
    Delete the created flavor and volume as test teardown, at the end of the test module.
    """
    flavor = nova_helper.create_flavor()[1]
    ResourceCleanup.add('flavor', resource_id=flavor, scope='module')

    return flavor
Example #9
0
def _create_flavor_vcpu_model(vcpu_model, root_disk_size=None):
    flv_id = nova_helper.create_flavor(name='vcpu_model_{}'.format(vcpu_model),
                                       root_disk=root_disk_size)[1]
    ResourceCleanup.add('flavor', flv_id)
    if vcpu_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: vcpu_model})

    return flv_id
Example #10
0
def test_lock_unlock_secure_boot_vm():
    """
    This is to test host lock with secure boot vm.

    :return:
    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]

    _check_secure_boot_on_vm(vm_id=vm_id)

    # Lock the compute node with the secure Vms
    compute_host = vm_helper.get_vm_host(vm_id=vm_id)
    host_helper.lock_host(compute_host, timeout=800)
    if not system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
    host_helper.unlock_host(compute_host, timeout=800)

    if system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
Example #11
0
def _create_flavor(flavor_info, storage_backing):
    root_disk = flavor_info[0]
    ephemeral = flavor_info[1]
    swap = flavor_info[2]

    flavor_id = nova_helper.create_flavor(ephemeral=ephemeral,
                                          swap=swap,
                                          root_disk=root_disk,
                                          storage_backing=storage_backing)[1]
    ResourceCleanup.add('flavor', flavor_id)
    return flavor_id
Example #12
0
def test_migration_auto_converge(no_simplex):
    """
    Auto converge a VM with stress-ng running

    Test Steps:
        - Create flavor
        - Create a heat stack (launch a vm with stress-ng)
        - Perform live-migration and verify connectivity

    Test Teardown:
        - Delete stacks,vm, flavors created

    """

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=3)[1]
    ResourceCleanup.add('flavor', flavor_id)

    # add migration timout
    extra_specs = {FlavorSpec.LIVE_MIG_TIME_OUT: 300}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Get the heat file name to use")
    heat_template = _get_stress_ng_heat()

    stack_name = vm_name = 'stress_ng'
    LOG.tc_step("Creating heat stack")
    code, msg = heat_helper.create_stack(stack_name=stack_name,
                                         template=heat_template,
                                         parameters={
                                             'flavor': flavor_id,
                                             'name': vm_name
                                         },
                                         cleanup='function')
    assert code == 0, "Failed to create heat stack"

    LOG.info("Verifying server creation via heat")
    vm_id = vm_helper.get_vm_id_from_name(vm_name='stress_ng', strict=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        LOG.tc_step("Check for Stress-ng inside vm")
        assert 0 == wait_for_stress_ng(vm_ssh), " Stress-ng is not running"

    for vm_actions in [['live_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Example #13
0
def test_vcpu_model_and_thread_policy(vcpu_model, thread_policy,
                                      cpu_models_supported):
    """
    Launch vm with vcpu model spec and cpu thread policy both set
    Args:
        vcpu_model (str):
        thread_policy (str):
        cpu_models_supported (tuple): fixture

    Test Steps:
        - create flavor with vcpu model and cpu thread extra specs set
        - boot vm from volume with above flavor
        - if no hyperthreaded host, check vm failed to schedule
        - otherwise check vcpu model and cpu thread policy both set as expected

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    is_supported = (vcpu_model
                    == 'Passthrough') or (vcpu_model
                                          in all_cpu_models_supported)
    if not is_supported:
        skip("{} is not supported by any hypervisor".format(vcpu_model))

    name = '{}_{}'.format(vcpu_model, thread_policy)
    flv_id = nova_helper.create_flavor(name=name, vcpus=2)[1]
    ResourceCleanup.add('flavor', flv_id)
    nova_helper.set_flavor(flavor=flv_id,
                           **{
                               FlavorSpec.VCPU_MODEL: vcpu_model,
                               FlavorSpec.CPU_POLICY: 'dedicated',
                               FlavorSpec.CPU_THREAD_POLICY: thread_policy
                           })

    code, vm, msg = vm_helper.boot_vm(name=name,
                                      flavor=flv_id,
                                      fail_ok=True,
                                      cleanup='function')
    ht_hosts = host_helper.get_hypersvisors_with_config(hyperthreaded=True,
                                                        up_only=True)
    if thread_policy == 'require' and not ht_hosts:
        assert 1 == code

    else:
        assert 0 == code, "VM is not launched successfully"
        check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model)
        vm_host = vm_helper.get_vm_host(vm)
        check_helper.check_topology_of_vm(vm_id=vm,
                                          vcpus=2,
                                          cpu_pol='dedicated',
                                          cpu_thr_pol=thread_policy,
                                          numa_num=1,
                                          vm_host=vm_host)
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
Example #15
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Example #16
0
def create_flavor_and_server_group(storage_backing=None, policy=None):
    LOG.tc_step("Create a flavor{}".format(' with {} aggregate'.format(
        storage_backing) if storage_backing else ''))
    flavor_id = nova_helper.create_flavor('srv_grp',
                                          storage_backing=storage_backing,
                                          cleanup='function')[1]

    srv_grp_id = None
    if policy is not None:
        LOG.tc_step(
            "Create a server group with policy set to {}".format(policy))
        srv_grp_id = nova_helper.create_server_group(policy=policy)[1]
        ResourceCleanup.add(resource_type='server_group',
                            resource_id=srv_grp_id)

    return flavor_id, srv_grp_id
Example #17
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
Example #18
0
def create_shared_flavor(vcpus=2, storage_backing='local_image', cpu_policy='dedicated',
                         numa_nodes=None, node0=None, node1=None, shared_vcpu=None):
    flavor_id = nova_helper.create_flavor(name='shared_core', vcpus=vcpus, storage_backing=storage_backing)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='function')

    LOG.tc_step("Add specific cpu_policy, number_of_numa_nodes, numa_node0, and shared_vcpu to flavor extra specs")
    extra_specs = {FlavorSpec.CPU_POLICY: cpu_policy}
    if numa_nodes is not None:
        extra_specs[FlavorSpec.NUMA_NODES] = numa_nodes
    if node0 is not None:
        extra_specs[FlavorSpec.NUMA_0] = node0
    if node1 is not None:
        extra_specs[FlavorSpec.NUMA_1] = node1
    if shared_vcpu is not None:
        extra_specs[FlavorSpec.SHARED_VCPU] = shared_vcpu

    nova_helper.set_flavor(flavor_id, **extra_specs)
    return flavor_id
Example #19
0
def _boot_vm_vcpu_model(flv_model=None,
                        img_model=None,
                        boot_source='volume',
                        avail_zone=None,
                        vm_host=None):
    LOG.tc_step(
        "Attempt to launch vm from {} with image vcpu model metadata: {}; flavor vcpu model extra spec: {}"
        .format(boot_source, img_model, flv_model))

    flv_id = nova_helper.create_flavor(name='vcpu_{}'.format(flv_model))[1]
    ResourceCleanup.add('flavor', flv_id)
    if flv_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: flv_model})

    if img_model:
        image_id = glance_helper.create_image(
            name='vcpu_{}'.format(img_model),
            cleanup='function',
            **{ImageMetadata.CPU_MODEL: img_model})[1]
    else:
        image_id = glance_helper.get_guest_image(
            guest_os=GuestImages.DEFAULT['guest'])

    if boot_source == 'image':
        source_id = image_id
    else:
        source_id = cinder_helper.create_volume(name='vcpu_model',
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)

    code, vm, msg = vm_helper.boot_vm(name='vcpu_model',
                                      flavor=flv_id,
                                      source=boot_source,
                                      source_id=source_id,
                                      fail_ok=True,
                                      cleanup='function',
                                      avail_zone=avail_zone,
                                      vm_host=vm_host)
    return code, vm, msg
Example #20
0
def test_something(host_to_modify, storage_backing):
    """
    Test parametrize the test function instead of the test fixture.

    Args:
        host_to_modify (str): fixture that returns the hostname under test
        storage_backing (str): storage backing to configure

    Setups:
        - Select a host and record the storage backing before test starts    (module)

    Test Steps:
        - Modify host storage backing to given storage backing if not already on it
        - Create a flavor with specified storage backing
        - Boot vm from above flavor

    Teardown:
        - Delete created vm, volume, flavor
        - Modify host storage backing to its original config if not already on it     (module)

    """
    # Modify host storage backing withc check_first=True, so it will not modify if already in that backing
    # if lock_host() has to be done inside test case, set swact=True, so it will handle CPE case
    LOG.tc_step(
        "Modify {} storage backing to {} if not already has the matching storage backing"
        .format(host_to_modify, storage_backing))
    host_helper.set_host_storage_backing(host_to_modify,
                                         inst_backing=storage_backing,
                                         check_first=True,
                                         lock=True,
                                         unlock=True)

    LOG.tc_step("Create a flavor with specified storage backing")
    flv_id = nova_helper.create_flavor(name='test_avoid_flv',
                                       storage_backing=storage_backing)[1]
    ResourceCleanup.add(resource_type='flavor', resource_id=flv_id)

    LOG.tc_step("Boot vm from above flavor")
    vm_id = vm_helper.boot_vm(name='test_avoid_vm', flavor=flv_id)[1]
    ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
Example #21
0
def test_timing():
    threads = []
    flav_id = nova_helper.create_flavor('thread_testing')[1]
    ResourceCleanup.add(resource_type='flavor', resource_id=flav_id)
    start_1 = time()
    for i in range(0, 6):
        thread = MThread(vm_helper.boot_vm, 'threading_vm', flavor=flav_id)
        thread.start_thread(240)
        threads.append(thread)

    for thread in threads:
        thread.wait_for_thread_end()
    for thread in threads:
        ResourceCleanup.add(resource_type='vm',
                            resource_id=thread.get_output()[1])
    end_1 = time()

    start_2 = time()
    for i in range(0, 2):
        vm_id = vm_helper.boot_vm('loop_vm', flav_id)[1]
        ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
    end_2 = time()

    LOG.info("Time results:\n"
             "Multithreading: {}\n"
             "Single loop: {}".format(end_1 - start_1, end_2 - start_2))
Example #22
0
def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus,
                        vm_type):

    LOG.tc_step(
        "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap disk".
        format(vcpus, ephemeral, swap))

    flavor_id = nova_helper.create_flavor(name='flv_4k',
                                          ephemeral=ephemeral,
                                          swap=swap,
                                          vcpus=vcpus,
                                          storage_backing=storage_backing)[1]
    ResourceCleanup.add('flavor', flavor_id)

    specs = {FlavorSpec.MEM_PAGE_SIZE: 'small'}

    if cpu_pol is not None:
        specs[FlavorSpec.CPU_POLICY] = cpu_pol

    LOG.tc_step("Add following extra specs: {}".format(specs))
    nova_helper.set_flavor(flavor=flavor_id, **specs)

    boot_source = 'volume' if vm_type == 'volume' else 'image'
    LOG.tc_step("Boot a vm from {}".format(boot_source))
    vm_id = vm_helper.boot_vm('4k_vm',
                              flavor=flavor_id,
                              source=boot_source,
                              cleanup='function')[1]
    __check_pagesize(vm_id)

    if vm_type == 'image_with_vol':
        LOG.tc_step("Attach volume to vm")
        vm_helper.attach_vol_to_vm(vm_id=vm_id)

    # make sure the VM is up and pingable from natbox
    LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    return vm_id
Example #23
0
def test_set_shared_vcpu_spec_reject(cpu_policy, vcpus, shared_vcpu):
    """
    Test set shared vcpu id to invalid value will be rejected.

    Args:
        cpu_policy (str): shared or dedicated
        vcpus (int): number of vcpus to set when creating flavor
        shared_vcpu (int): vcpu id to attempt to set to

    Test Steps:
        - Create flavor with given number of vcpus
        - Set cpu_policy extra spec to given value
        - Attempt to set shared vcpu id to specific value (invalid value)
        - Ensure cli is rejected

    Teardown:
        - Delete created flavor

    """
    LOG.tc_step("Create flavor with {} vcpus, and set cpu_policy to {}".format(vcpus, cpu_policy))

    flavor = nova_helper.create_flavor(vcpus=vcpus)[1]
    ResourceCleanup.add('flavor', flavor, scope='function')
    nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: cpu_policy})

    LOG.tc_step("Attempt to set shared_vcpu spec to invalid value - {} and verify it's rejected.".format(shared_vcpu))
    code, output = nova_helper.set_flavor(flavor, fail_ok=True, **{FlavorSpec.SHARED_VCPU: shared_vcpu})

    error_msg = 'undefined'
    if cpu_policy == 'shared':
        error_msg = SharedCPUErr.DEDICATED_CPU_REQUIRED
    elif shared_vcpu < 0:
        error_msg = SharedCPUErr.INVALID_VCPU_ID
    elif shared_vcpu >= vcpus:
        error_msg = SharedCPUErr.MORE_THAN_FLAVOR.format(shared_vcpu, vcpus)

    assert code == 1, "Set vcpu id cli should be rejected."
    assert error_msg in output, "Error message mismatch. Actual: {}".format(output)
Example #24
0
def test_vmx_setting():
    """
    Test that vmx feature can be set in guest VM.

    Test Steps:
       - Create a flavor with extra specs hw:wrs:nested_vmx=True and hw:cpu_model=<a cpu model supported by the host>
       - Instantiate a VM with the flavor and check that vm has correct vcpu model
       - ssh into the VM and execute "grep vmx /proc/cpuinfo" and verify that vmx feature is set
    """

    # Create a flavor with specs: hw:wrs:nested_vmx=True and extraspec hw:cpu_model=<compute host cpu model>

    host_cpu_model = 'Passthrough'
    LOG.tc_step("Create flavor for vcpu model {}".format(host_cpu_model))
    flavor_id = nova_helper.create_flavor(fail_ok=False)[1]
    ResourceCleanup.add('flavor', flavor_id)

    LOG.tc_step(
        "Set extra specs for flavor of vcpu model {}".format(host_cpu_model))
    extra_specs = {
        FlavorSpec.NESTED_VMX: True,
        FlavorSpec.VCPU_MODEL: host_cpu_model
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Create VM for vcpu model {}".format(host_cpu_model))
    code, vm, msg = vm_helper.boot_vm(flavor=flavor_id,
                                      cleanup='function',
                                      fail_ok=False)
    ResourceCleanup.add('vm', vm)
    LOG.tc_step("Check vcpu model is correct")
    host = vm_helper.get_vm_host(vm)
    expt_arch = host_helper.get_host_cpu_model(host)
    check_vm_cpu_model(vm_id=vm, vcpu_model='Passthrough', expt_arch=expt_arch)

    LOG.tc_step("Checking to see if 'vmx' is in /proc/cpuinfo")
    with vm_helper.ssh_to_vm_from_natbox(vm) as vm_ssh:
        vm_ssh.exec_cmd("grep vmx /proc/cpuinfo", fail_ok=False)
Example #25
0
    def test_resize_vm_shared_cpu_negative(self, vcpus, cpu_policy, shared_vcpu, basic_vm):
        """
        Test resize request is rejected if system does not meet the shared_cpu requirement(s) in the flavor

        Args:
            vcpus (int): number of vcpus in flavor
            cpu_policy (str): cpu_policy in flavor extra specs
            shared_vcpu (int):
            basic_vm (str): id of a basic vm to attempt resize on

        Setup:
            - Boot a basic vm (module)

        Test Steps:
            - Create a flavor with given number of vcpus
            - Set extra specs for cpu_policy, shared_vcpu
            - Attempt to resize the basic vm with the flavor
            - Ensure cli is rejected and proper error returned

        Teardowns:
            - Delete created vm and volume (module)

        """
        vm_id, storage_backing = basic_vm
        LOG.tc_step("Create a flavor with {} vcpus. Set extra specs with: {} cpu_policy, {} shared_vcpu".format(
                vcpus, cpu_policy, shared_vcpu))
        flavor = nova_helper.create_flavor(name='shared_cpu', vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor, scope='module')
        nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: cpu_policy})
        nova_helper.set_flavor(flavor, **{FlavorSpec.SHARED_VCPU: shared_vcpu})

        LOG.tc_step("Attempt to resize vm with invalid flavor, and verify resize request is rejected.")
        code, msg = vm_helper.resize_vm(vm_id, flavor, fail_ok=True)
        assert code == 1, "Resize vm request is not rejected"
        assert re.search(ResizeVMErr.SHARED_NOT_ENABLED.format('0'), msg)

        LOG.tc_step("Ensure VM is still pingable after resize reject")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
Example #26
0
def create_rt_flavor(vcpus, cpu_pol, cpu_rt, rt_mask, shared_vcpu, fail_ok=False,
                     storage_backing=None, numa_nodes=None, cpu_thread=None, min_vcpus=None):
    LOG.tc_step("Create a flavor with {} vcpus".format(vcpus))
    flv_id = nova_helper.create_flavor(name='cpu_rt_{}'.format(vcpus), vcpus=vcpus, storage_backing=storage_backing)[1]
    ResourceCleanup.add('flavor', flv_id)

    args = {
        FlavorSpec.CPU_POLICY: cpu_pol,
        FlavorSpec.CPU_REALTIME: cpu_rt,
        FlavorSpec.CPU_REALTIME_MASK: rt_mask,
        FlavorSpec.SHARED_VCPU: shared_vcpu,
        # FlavorSpec.NUMA_NODES: numa_nodes,
        FlavorSpec.CPU_THREAD_POLICY: cpu_thread,
        # FlavorSpec.MIN_VCPUS: min_vcpus
    }

    extra_specs = {}
    for key, val in args.items():
        if val is not None:
            extra_specs[key] = val

    LOG.tc_step("Set flavor extra specs: {}".format(extra_specs))
    code, output = nova_helper.set_flavor(flv_id, fail_ok=fail_ok, **extra_specs)
    return flv_id, code, output
Example #27
0
    def test_evacuate_vms_with_inst_backing(self, hosts_per_backing,
                                            storage_backing):
        """
        Test evacuate vms with various vm storage configs and host instance
        backing configs

        Args:
            storage_backing: storage backing under test

        Skip conditions:
            - Less than two hosts configured with storage backing under test

        Setups:
            - Add admin role to primary tenant (module)

        Test Steps:
            - Create flv_rootdisk without ephemeral or swap disks, and set
            storage backing extra spec
            - Create flv_ephemswap with ephemeral AND swap disks, and set
            storage backing extra spec
            - Boot following vms on same host and wait for them to be
            pingable from NatBox:
                - Boot vm1 from volume with flavor flv_rootdisk
                - Boot vm2 from volume with flavor flv_localdisk
                - Boot vm3 from image with flavor flv_rootdisk
                - Boot vm4 from image with flavor flv_rootdisk, and attach a
                volume to it
                - Boot vm5 from image with flavor flv_localdisk
            - sudo reboot -f on vms host
            - Ensure evacuation for all 5 vms are successful (vm host
            changed, active state, pingable from NatBox)

        Teardown:
            - Delete created vms, volumes, flavors
            - Remove admin role from primary tenant (module)

        """
        hosts = hosts_per_backing.get(storage_backing, [])
        if len(hosts) < 2:
            skip(
                SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format(
                    storage_backing))

        target_host = hosts[0]

        LOG.tc_step("Create a flavor without ephemeral or swap disks")
        flavor_1 = nova_helper.create_flavor(
            'flv_rootdisk', storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_1, scope='function')

        LOG.tc_step("Create another flavor with ephemeral and swap disks")
        flavor_2 = nova_helper.create_flavor(
            'flv_ephemswap',
            ephemeral=1,
            swap=512,
            storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_2, scope='function')

        LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait "
                    "for it pingable from NatBox")
        vm1_name = "vol_root"
        vm1 = vm_helper.boot_vm(vm1_name,
                                flavor=flavor_1,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vms_info = {
            vm1: {
                'ephemeral': 0,
                'swap': 0,
                'vm_type': 'volume',
                'disks': vm_helper.get_vm_devices_via_virsh(vm1)
            }
        }
        vm_helper.wait_for_vm_pingable_from_natbox(vm1)

        LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm2_name = "vol_ephemswap"
        vm2 = vm_helper.boot_vm(vm2_name,
                                flavor=flavor_2,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm2)
        vms_info[vm2] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'volume',
            'disks': vm_helper.get_vm_devices_via_virsh(vm2)
        }

        LOG.tc_step(
            "Boot vm3 from image with flavor flv_rootdisk and wait for "
            "it pingable from NatBox")
        vm3_name = "image_root"
        vm3 = vm_helper.boot_vm(vm3_name,
                                flavor=flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm3)
        vms_info[vm3] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm3)
        }

        LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a "
                    "volume to it and wait for it "
                    "pingable from NatBox")
        vm4_name = 'image_root_attachvol'
        vm4 = vm_helper.boot_vm(vm4_name,
                                flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vol = cinder_helper.create_volume(bootable=False)[1]
        ResourceCleanup.add('volume', vol, scope='function')
        vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False)

        vm_helper.wait_for_vm_pingable_from_natbox(vm4)
        vms_info[vm4] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image_with_vol',
            'disks': vm_helper.get_vm_devices_via_virsh(vm4)
        }

        LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm5_name = 'image_ephemswap'
        vm5 = vm_helper.boot_vm(vm5_name,
                                flavor_2,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm5)
        vms_info[vm5] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm5)
        }

        LOG.tc_step("Check all VMs are booted on {}".format(target_host))
        vms_on_host = vm_helper.get_vms_on_host(hostname=target_host)
        vms = [vm1, vm2, vm3, vm4, vm5]
        assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \
                                             "Current vms on host: {}". \
            format(vms, vms_on_host)

        for vm_ in vms:
            LOG.tc_step("Touch files under vm disks {}: "
                        "{}".format(vm_, vms_info[vm_]))
            file_paths, content = touch_files_under_vm_disks(
                vm_, **vms_info[vm_])
            vms_info[vm_]['file_paths'] = file_paths
            vms_info[vm_]['content'] = content

        LOG.tc_step("Reboot target host {}".format(target_host))
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               ping_vms=True)

        LOG.tc_step("Check files after evacuation")
        for vm_ in vms:
            LOG.info("--------------------Check files for vm {}".format(vm_))
            check_helper.check_vm_files(vm_id=vm_,
                                        vm_action='evacuate',
                                        storage_backing=storage_backing,
                                        prev_host=target_host,
                                        **vms_info[vm_])
        vm_helper.ping_vms_from_natbox(vms)
Example #28
0
def test_set_shared_vcpu_spec(vcpu_id):
    flavor = nova_helper.create_flavor(name='shared_vcpus', vcpus=64)[1]
    ResourceCleanup.add('flavor', resource_id=flavor)
    nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.SHARED_VCPU: vcpu_id})
Example #29
0
    def _test_shared_vcpu_pinning_constraints(self, vcpus, numa_nodes, numa_node0, shared_vcpu,
                                              add_shared_cpu, add_admin_role_func):
        """
        Tests the following:
        - That pinning constraints do not count on shared vCPU (TC5098)

        Test Setup:
            - Configure at least two computes to have shared cpus via
                'system host-cpu-modify -f shared p0=1,p1=1 <hostname>' (module)

        Test Steps:
            - enable shared CPU on a compute node
            - create flavor with shared vCPU
            - determine how many pcpus are available on a compute host
            - create a flavor with no shared vCPUs
                - has a number of vCPUs so that after booting it the number of available pcpus is:
                shared CPU flavor #vCPUs-1
            - boot instances using flavor without shared CPU.
            - boot instance with shared CPU flavor
            - confirm (via vm-topology and virsh vcpupin) that shared_vcpu of instance is pinned to the shared pcpu
                -and the remaining vcpus are pinned to the available physical cpus from the previous step.

        Teardown:
            - Delete created vms and flavors
        """

        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        if max_vcpus_per_proc[numa_node0][0] < vcpus/numa_nodes \
                or max_vcpus_per_proc[0 if numa_node0 == 1 else 1][0] < vcpus - (vcpus/numa_nodes):
            skip("Less than {} VMs cores on numa node0 of any hypervisor with shared cpu".format(vcpus/numa_nodes))

        # make vm
        LOG.tc_step("Make a flavor with {} shared vcpus".format(vcpus))
        flavor_1 = create_shared_flavor(vcpus=vcpus, numa_nodes=numa_nodes, node0=numa_node0, shared_vcpu=shared_vcpu,
                                        storage_backing=storage_backing)
        ResourceCleanup.add('flavor', flavor_1)

        # select a compute node to use
        vm_host = max_vcpus_per_proc[numa_node0][1]

        # get the available vcpus on the selected numa node
        LOG.tc_step("Check how many vCPUs are available on the selected node")
        numa0_used_cpus, numa0_total_cpus = host_helper.get_vcpus_per_proc(vm_host)[vm_host][0]
        available_vcpus = len(numa0_total_cpus) - len(numa0_used_cpus)

        # create a flavor with no shared vcpu
        LOG.tc_step("Create a flavor with enough vcpus to fill the diff")
        no_share_flavor = nova_helper.create_flavor(vcpus=available_vcpus-(vcpus - 1))[1]
        ResourceCleanup.add('flavor', no_share_flavor)
        second_specs = {FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.NUMA_NODES: numa_nodes,
                        FlavorSpec.NUMA_0: numa_node0}
        nova_helper.set_flavor(no_share_flavor, **second_specs)

        LOG.tc_step("boot vm with above flavor")
        vm_1 = vm_helper.boot_vm(flavor=no_share_flavor, cleanup='function', fail_ok=False, vm_host=vm_host)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_1)
        GuestLogs.add(vm_1)

        # get the available vcpus on the selected numa node
        LOG.tc_step("Check how many vCPUs are available on the selected node")
        numa0_used_cpus, numa0_total_cpus = host_helper.get_vcpus_per_proc(vm_host)[vm_host][0]
        available_vcpus = len(numa0_total_cpus) - len(numa0_used_cpus)
        assert available_vcpus == vcpus-1

        prev_total_vcpus = host_helper.get_vcpus_for_computes()

        LOG.tc_step("Boot a VM with the shared VCPU Flavor")
        vm_share = vm_helper.boot_vm(flavor=flavor_1, cleanup='function', fail_ok=False, vm_host=vm_host)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_share)
        GuestLogs.add(vm_share)

        check_shared_vcpu(vm=vm_share, vcpus=vcpus, prev_total_vcpus=prev_total_vcpus, shared_vcpu=shared_vcpu,
                          numa_nodes=numa_nodes, numa_node0=numa_node0)
Example #30
0
def test_cpu_realtime_vm_actions(vcpus, cpu_rt, rt_mask, rt_source, shared_vcpu, numa_nodes, cpu_thread, check_hosts):
    """
    Test vm with realtime cpu policy specified in flavor
    Args:
        vcpus (int):
        cpu_rt (str|None):
        rt_source (str): flavor or image
        rt_mask (str):
        shared_vcpu (int|None):min_vcpus
        numa_nodes (int|None): number of numa_nodes to boot vm on
        cpu_thread
        check_hosts (tuple): test fixture

    Setups:
        - check storage backing and whether system has shared cpu configured

    Test Steps:
        - Create a flavor with given cpu realtime, realtime mask and shared vcpu extra spec settings
        - Create a vm with above flavor
        - Verify cpu scheduler policies via virsh dumpxml and ps
        - Perform following nova actions and repeat above step after each action:
            ['suspend', 'resume'],
            ['live_migrate'],
            ['cold_migrate'],
            ['rebuild']

    """
    storage_backing, hosts_with_shared_cpu, ht_hosts = check_hosts

    if cpu_thread == 'require' and len(ht_hosts) < 2:
        skip("Less than two hyperthreaded hosts")

    if shared_vcpu is not None and len(hosts_with_shared_cpu) < 2:
        skip("Less than two up hypervisors configured with shared cpu")

    cpu_rt_flv = cpu_rt
    if rt_source == 'image':
        # rt_mask_flv = cpu_rt_flv = None
        rt_mask_flv = '^0'
        rt_mask_img = rt_mask
    else:
        rt_mask_flv = rt_mask
        rt_mask_img = None

    image_id = None
    if rt_mask_img is not None:
        image_metadata = {ImageMetadata.CPU_RT_MASK: rt_mask_img}
        image_id = glance_helper.create_image(name='rt_mask', cleanup='function', **image_metadata)[1]

    vol_id = cinder_helper.create_volume(source_id=image_id)[1]
    ResourceCleanup.add('volume', vol_id)

    name = 'rt-{}_mask-{}_{}vcpu'.format(cpu_rt, rt_mask_flv, vcpus)
    flv_id = create_rt_flavor(vcpus, cpu_pol='dedicated', cpu_rt=cpu_rt_flv, rt_mask=rt_mask_flv,
                              shared_vcpu=shared_vcpu, numa_nodes=numa_nodes, cpu_thread=cpu_thread,
                              storage_backing=storage_backing)[0]

    LOG.tc_step("Boot a vm with above flavor")
    vm_id = vm_helper.boot_vm(name=name, flavor=flv_id, cleanup='function', source='volume', source_id=vol_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    expt_rt_cpus, expt_ord_cpus = parse_rt_and_ord_cpus(vcpus=vcpus, cpu_rt=cpu_rt, cpu_rt_mask=rt_mask)

    check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu)
    vm_host = vm_helper.get_vm_host(vm_id)
    if shared_vcpu:
        assert vm_host in hosts_with_shared_cpu

    numa_num = 1 if numa_nodes is None else numa_nodes
    check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, vm_host=vm_host)

    expt_current_cpu = vcpus
    # if min_vcpus is not None:
    #     GuestLogs.add(vm_id)
    #     LOG.tc_step("Scale down cpu once")
    #     vm_helper.scale_vm(vm_id, direction='down', resource='cpu')
    #     vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    #
    #     LOG.tc_step("Check current vcpus in nova show is reduced after scale down")
    #     expt_current_cpu -= 1
    #     check_helper.check_vm_vcpus_via_nova_show(vm_id, min_vcpus, expt_current_cpu, vcpus)

    for actions in [['suspend', 'resume'], ['stop', 'start'], ['live_migrate'], ['cold_migrate'], ['rebuild']]:
        LOG.tc_step("Perform {} on vm and check realtime cpu policy".format(actions))
        for action in actions:
            kwargs = {}
            if action == 'rebuild':
                kwargs = {'image_id': image_id}
            vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host_post_action = vm_helper.get_vm_host(vm_id)
        if shared_vcpu:
            assert vm_host_post_action in hosts_with_shared_cpu

        LOG.tc_step("Check cpu thread policy in vm topology and vcpus in nova show after {}".format(actions))
        check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, numa_num=numa_num,
                                          vm_host=vm_host_post_action, current_vcpus=expt_current_cpu)

        check_virsh = True
        offline_cpu = None

        check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu,
                                               offline_cpus=offline_cpu, check_virsh_vcpusched=check_virsh)