Example #1
0
def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus,
                        vm_type):
    LOG.tc_step(
        "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap "
        "disk".format(vcpus, ephemeral, swap))
    flavor_id = nova_helper.create_flavor(
        name='migration_test', ephemeral=ephemeral, swap=swap, vcpus=vcpus,
        storage_backing=storage_backing, cleanup='function')[1]

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}

        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    boot_source = 'volume' if vm_type == 'volume' else 'image'
    LOG.tc_step("Boot a vm from {}".format(boot_source))
    vm_id = vm_helper.boot_vm('migration_test',
                              flavor=flavor_id, source=boot_source,
                              reuse_vol=False,
                              cleanup='function')[1]

    if vm_type == 'image_with_vol':
        LOG.tc_step("Attach volume to vm")
        vm_helper.attach_vol_to_vm(vm_id=vm_id, mount=False)

    return vm_id
Example #2
0
def test_boot_windows_guest():
    """
    Boot a windows guest to assist for manual testing on windows guest
    """
    # Change the following parameters to change the vm type.
    guest = 'win_2012'  # such as tis-centos-guest
    storage = 'local_image'  # local_lvm, local_image, or remote
    boot_source = 'image'  # volume or image

    LOG.tc_step("Get/Create {} glance image".format(guest))
    glance_helper.get_guest_image(guest_os=guest)

    LOG.tc_step("Create flavor with {} storage backing".format(storage))
    flv_id = nova_helper.create_flavor(name='{}-{}'.format(storage, guest),
                                       vcpus=4,
                                       ram=8192,
                                       storage_backing=storage,
                                       guest_os=guest)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    LOG.tc_step("Boot {} vm".format(guest))
    vm_id = vm_helper.boot_vm(name='{}-{}'.format(guest, storage),
                              flavor=flv_id,
                              guest_os=guest,
                              source=boot_source)[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        code, output = vm_ssh.exec_cmd('pwd', fail_ok=False)
        LOG.info(output)

    LOG.info(
        "{} is successfully booted from {} with {} storage backing".format(
            guest, boot_source, storage))
def test_set_flavor_extra_specs(flavor_to_test, extra_spec_name, values):
    """
    Args:
        flavor_to_test:
        extra_spec_name:
        values:

    Setups:
        - Create a basic flavor

    Test Steps:
        - Set specific extra spec to given values for the basic flavor
        - Check extra spec is now included in the flavor

    Teardown:
        - Delete the basic flavor
    """
    for value in values:
        value = str(value)
        extra_spec = {extra_spec_name: value}

        LOG.tc_step("Set flavor extra spec to: {} and verify extra spec is set successfully.".format(extra_spec))
        nova_helper.set_flavor(flavor=flavor_to_test, **extra_spec)

        post_extra_spec = nova_helper.get_flavor_properties(flavor=flavor_to_test)
        assert post_extra_spec[extra_spec_name] == value, "Actual flavor extra specs: {}".format(post_extra_spec)
Example #4
0
def create_rt_vm(hypervisor):
    global testable_hypervisors
    LOG.tc_step('Create/get glance image using rt guest image')
    image_id = glance_helper.get_guest_image(guest_os='tis-centos-guest-rt',
                                             cleanup='module')

    vcpu_count = VM_CPU_NUM
    non_rt_core = 0
    LOG.tc_step(
        'Create a flavor with specified cpu model, cpu policy, realtime mask, and 2M pagesize'
    )
    flavor_id, storage_backing = nova_helper.create_flavor(
        ram=1024, vcpus=vcpu_count, root_disk=2,
        storage_backing='local_image')[1:3]
    cpu_info = dict(testable_hypervisors[hypervisor]['cpu_info'])
    extra_specs = {
        FlavorSpec.VCPU_MODEL: cpu_info['model'],
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.CPU_REALTIME: 'yes',
        FlavorSpec.CPU_REALTIME_MASK: '^{}'.format(non_rt_core),
        FlavorSpec.MEM_PAGE_SIZE: 2048,
    }
    nova_helper.set_flavor(flavor_id, **extra_specs)

    LOG.tc_step(
        'Boot a VM with rt flavor and image on the targeted hypervisor: {}'.
        format(hypervisor))
    vm_id = vm_helper.boot_vm(flavor=flavor_id,
                              source='image',
                              source_id=image_id,
                              vm_host=hypervisor,
                              cleanup='function')[1]
    return vm_id, vcpu_count, non_rt_core
Example #5
0
def create_flavor(vm_type, flavor_type=None, name=core_flavor_name):
    global g_flavors

    extra_specs = {}

    if 'non_vtpm' in vm_type or (flavor_type and 'non_vtpm' in flavor_type):
        name += '_nonvtpm'
        extra_specs['sw:wrs:vtpm'] = 'false'
    else:
        extra_specs['sw:wrs:vtpm'] = 'true'

    if 'non_autorc' in vm_type or (flavor_type
                                   and 'non_autorc' in flavor_type):
        name += '_nonrc'
        extra_specs['sw:wrs:auto_recovery'] = 'false'

    elif 'autorc' in vm_type or (flavor_type and 'autorc' in flavor_type):
        name += '_autorc'
        extra_specs['sw:wrs:auto_recovery'] = 'true'

    flavor_id = nova_helper.create_flavor(name=name)[1]
    nova_helper.set_flavor(flavor_id, **extra_specs)

    if flavor_type is not None:
        g_flavors[flavor_type] = flavor_id
    else:
        g_flavors[vm_type] = flavor_id

    return flavor_id
Example #6
0
def test_flavor_set_storage(flavor_):
    """
    Test set flavor storage specs

    Test Setup:
        - Create a flavor
    Test Steps:
        - Set flavor storage spec to local_lvm and check it is set successfully
        - Set flavor storage spec to local_image and check it is set successfully
    Test Teardown:
        - Delete the created flavor
    """
    storage_spec = "aggregate_instance_extra_specs:storage"

    LOG.tc_step("Set flavor storage spec to local_lvm and check it is set successfully")
    local_lvm_spec = {storage_spec: "local_lvm"}
    nova_helper.set_flavor(flavor=flavor_, **local_lvm_spec)
    extra_spec_storage_1 = nova_helper.get_flavor_properties(flavor=flavor_)[storage_spec]
    assert extra_spec_storage_1 == 'local_lvm', "Actual storage spec: {}".format(extra_spec_storage_1)

    LOG.tc_step("Set flavor storage spec to local_image and check it is set successfully")
    local_lvm_spec = {storage_spec: "local_image"}
    nova_helper.set_flavor(flavor=flavor_, **local_lvm_spec)
    extra_spec_storage_2 = nova_helper.get_flavor_properties(flavor=flavor_)[storage_spec]
    assert extra_spec_storage_2 == 'local_image', "Actual storage spec: {}".format(extra_spec_storage_2)
Example #7
0
def test_2_nodes_set_guest_numa_node_value(flavor_2_nodes, cpu_policy, numa_0,
                                           numa_1):
    """
    Test set guest NUMA nodes values with 2 NUMA nodes.
    Args:
        flavor_2_nodes (str): id of a flavor with 2 numa nodes set in the extra spec
        cpu_policy (str): cpu policy to add to flavor
        numa_0 (int or str): cell id to assign to numa_node.0
        numa_1 (int or str): cell id to assign to numa_node.1

    Setup:
        - Create a flavor with number of numa nodes set to 2 in extra specs (module level)

    Test Steps:
        - Set cpu policy to given policy in flavor extra specs
        - Set guest numa nodes values in flavor extra specs and ensure it's set.

    Notes: Has to set both guest nodes in one cli. Otherwise cli will be rejected as expected.

    Teardown:
        - Delete created flavor (module level)

    """

    LOG.tc_step("Set flavor cpu_policy spec to {}.".format(cpu_policy))
    nova_helper.set_flavor(flavor=flavor_2_nodes,
                           **{FlavorSpec.CPU_POLICY: cpu_policy})

    args = {FlavorSpec.NUMA_0: numa_0, FlavorSpec.NUMA_1: numa_1}
    LOG.tc_step(
        "Set flavor numa_node spec(s) to {} and verify setting succeeded".
        format(args))
    nova_helper.set_flavor(flavor=flavor_2_nodes, **args)
def _flavors(hosts_pci_device_info):
    """
    Creates all flavors required for this test module
    """
    # Create flavor using first device.
    pci_alias = list(hosts_pci_device_info.values())[0][0]['pci_alias']
    flavor_parms = {'flavor_qat_vf_1': [2, 1024, 2, 1],
                    'flavor_resize_qat_vf_1': [4, 2048, 2, 1],
                    'flavor_qat_vf_4': [2, 1024, 2, 4],
                    'flavor_resize_qat_vf_4': [2, 2048, 2, 4],
                    'flavor_qat_vf_32': [2, 1024, 2, 32],
                    'flavor_qat_vf_33': [2, 1024, 2, 33],
                    'flavor_none': [1, 1024, 2, 0],
                    'flavor_resize_none': [2, 2048, 2, 0],
                    'flavor_resize_qat_vf_32': [4, 2048, 2, 32],
                    }

    flavors = {}
    for k, v in flavor_parms.items():
        vf = v[3]
        LOG.fixture_step("Create a flavor with {} Coletro Creek crypto VF....".format(vf))
        flavor_id = nova_helper.create_flavor(name=k, vcpus=v[0], ram=v[1], root_disk=v[2])[1]
        ResourceCleanup.add('flavor', flavor_id, scope='module')
        if vf > 0:
            extra_spec = {FlavorSpec.PCI_PASSTHROUGH_ALIAS: '{}:{}'.format(pci_alias, vf),
                          # FlavorSpec.NUMA_NODES: '2',
                          # feature deprecated. May need to update test case as well.
                          FlavorSpec.CPU_POLICY: 'dedicated'}

            nova_helper.set_flavor(flavor_id, **extra_spec)
        flavors[k] = flavor_id

    return flavors
Example #9
0
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
Example #10
0
    def create_flavor_for_pci(self, vcpus=4, ram=1024):

        flavor_id = nova_helper.create_flavor(name='dedicated_pci_extras',
                                              vcpus=vcpus,
                                              ram=ram,
                                              cleanup='function')[1]

        pci_alias_spec = '{}:{}'.format(
            self.pci_alias_names[0],
            self.pci_alias) if self.pci_alias else None
        LOG.tc_step('Set extra-specs to the flavor {}'.format(flavor_id))
        extra_specs = {
            FlavorSpec.CPU_POLICY:
            'dedicated',
            # FlavorSpec.PCI_NUMA_AFFINITY: self.pci_numa_affinity, # LP1854516
            FlavorSpec.PCI_PASSTHROUGH_ALIAS:
            pci_alias_spec,
            FlavorSpec.PCI_IRQ_AFFINITY_MASK:
            self.pci_irq_affinity_mask
        }
        extra_specs = {
            k: str(v)
            for k, v in extra_specs.items() if v is not None
        }

        if extra_specs:
            nova_helper.set_flavor(flavor_id, **extra_specs)

        return flavor_id
Example #11
0
def vif_model_check(request):
    vif_model = request.param

    LOG.fixture_step(
        "Get a network that supports {} to boot vm".format(vif_model))
    pci_net = network_helper.get_pci_vm_network(pci_type=vif_model,
                                                net_name='internal0-net')
    if not pci_net:
        skip(SkipHostIf.PCI_IF_UNAVAIL)

    extra_pcipt_net_name = extra_pcipt_net = None
    if not isinstance(pci_net, str):
        pci_net, extra_pcipt_net_name = pci_net
    LOG.info("PCI network selected to boot vm: {}".format(pci_net))

    LOG.fixture_step("Create a flavor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated',
                                          ram=2048,
                                          cleanup='module')[1]
    extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.fixture_step("Boot a base vm with above flavor and virtio nics")

    mgmt_net_id = network_helper.get_mgmt_net_id()
    pci_net_id, seg_id, pnet_name = network_helper.get_network_values(
        network=pci_net,
        fields=('id', 'provider:segmentation_id', 'provider:physical_network'))

    nics = [{'net-id': mgmt_net_id}, {'net-id': pci_net_id}]
    nics_to_test = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': pci_net_id,
        'vif-model': vif_model
    }]
    pcipt_seg_ids = {}
    if vif_model == 'pci-passthrough':
        pcipt_seg_ids[pci_net] = seg_id
        if extra_pcipt_net_name:
            extra_pcipt_net, seg_id = network_helper.get_network_values(
                network=extra_pcipt_net_name,
                fields=('id', 'provider:segmentation_id'))
            nics.append({'net-id': extra_pcipt_net})
            nics_to_test.append({
                'net-id': extra_pcipt_net,
                'vif-model': vif_model
            })
            pcipt_seg_ids[extra_pcipt_net_name] = seg_id

    base_vm = vm_helper.boot_vm(flavor=flavor_id, nics=nics,
                                cleanup='module')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
    vm_helper.ping_vms_from_vm(base_vm,
                               base_vm,
                               net_types=['mgmt', 'internal'])

    return vif_model, base_vm, flavor_id, nics_to_test, pcipt_seg_ids, pnet_name, extra_pcipt_net
Example #12
0
def _create_flavor_vcpu_model(vcpu_model, root_disk_size=None):
    flv_id = nova_helper.create_flavor(name='vcpu_model_{}'.format(vcpu_model),
                                       root_disk=root_disk_size)[1]
    ResourceCleanup.add('flavor', flv_id)
    if vcpu_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: vcpu_model})

    return flv_id
Example #13
0
def flavors():
    flvs = {}
    for numa in ['0', '1']:
        numa_flv = nova_helper.create_flavor(name='numa{}'.format(numa), vcpus=2)[1]
        # ResourceCleanup.add('flavor', numa_flv, scope='module')
        flvs['numa{}'.format(numa)] = numa_flv
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.NUMA_0: numa}
        nova_helper.set_flavor(numa_flv, **extra_specs)

    return flvs
Example #14
0
def test_create_flavor(name, swap, ephemeral, storage, cpu_policy):
    flavor_id = nova_helper.create_flavor(name=name,
                                          swap=swap,
                                          ephemeral=ephemeral)[1]
    LOG.info("Flavor id: {}".format(flavor_id))
    specs = {
        'aggregate_instance_extra_specs:storage': storage,
        'hw:cpu_policy': cpu_policy
    }
    nova_helper.set_flavor(flavor=flavor_id, **specs)
Example #15
0
def test_migration_auto_converge(no_simplex):
    """
    Auto converge a VM with stress-ng running

    Test Steps:
        - Create flavor
        - Create a heat stack (launch a vm with stress-ng)
        - Perform live-migration and verify connectivity

    Test Teardown:
        - Delete stacks,vm, flavors created

    """

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=3)[1]
    ResourceCleanup.add('flavor', flavor_id)

    # add migration timout
    extra_specs = {FlavorSpec.LIVE_MIG_TIME_OUT: 300}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Get the heat file name to use")
    heat_template = _get_stress_ng_heat()

    stack_name = vm_name = 'stress_ng'
    LOG.tc_step("Creating heat stack")
    code, msg = heat_helper.create_stack(stack_name=stack_name,
                                         template=heat_template,
                                         parameters={
                                             'flavor': flavor_id,
                                             'name': vm_name
                                         },
                                         cleanup='function')
    assert code == 0, "Failed to create heat stack"

    LOG.info("Verifying server creation via heat")
    vm_id = vm_helper.get_vm_id_from_name(vm_name='stress_ng', strict=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        LOG.tc_step("Check for Stress-ng inside vm")
        assert 0 == wait_for_stress_ng(vm_ssh), " Stress-ng is not running"

    for vm_actions in [['live_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Example #16
0
    def flavor_mem_page_size(self, request, flavor_2g):
        flavor_id = flavor_2g[0]
        mem_page_size = request.param
        skip_4k_for_ovs(mem_page_size)

        if mem_page_size is None:
            nova_helper.unset_flavor(flavor_id, FlavorSpec.MEM_PAGE_SIZE)
        else:
            nova_helper.set_flavor(flavor_id,
                                   **{FlavorSpec.MEM_PAGE_SIZE: mem_page_size})

        return mem_page_size
Example #17
0
def test_vcpu_model_and_thread_policy(vcpu_model, thread_policy,
                                      cpu_models_supported):
    """
    Launch vm with vcpu model spec and cpu thread policy both set
    Args:
        vcpu_model (str):
        thread_policy (str):
        cpu_models_supported (tuple): fixture

    Test Steps:
        - create flavor with vcpu model and cpu thread extra specs set
        - boot vm from volume with above flavor
        - if no hyperthreaded host, check vm failed to schedule
        - otherwise check vcpu model and cpu thread policy both set as expected

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    is_supported = (vcpu_model
                    == 'Passthrough') or (vcpu_model
                                          in all_cpu_models_supported)
    if not is_supported:
        skip("{} is not supported by any hypervisor".format(vcpu_model))

    name = '{}_{}'.format(vcpu_model, thread_policy)
    flv_id = nova_helper.create_flavor(name=name, vcpus=2)[1]
    ResourceCleanup.add('flavor', flv_id)
    nova_helper.set_flavor(flavor=flv_id,
                           **{
                               FlavorSpec.VCPU_MODEL: vcpu_model,
                               FlavorSpec.CPU_POLICY: 'dedicated',
                               FlavorSpec.CPU_THREAD_POLICY: thread_policy
                           })

    code, vm, msg = vm_helper.boot_vm(name=name,
                                      flavor=flv_id,
                                      fail_ok=True,
                                      cleanup='function')
    ht_hosts = host_helper.get_hypersvisors_with_config(hyperthreaded=True,
                                                        up_only=True)
    if thread_policy == 'require' and not ht_hosts:
        assert 1 == code

    else:
        assert 0 == code, "VM is not launched successfully"
        check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model)
        vm_host = vm_helper.get_vm_host(vm)
        check_helper.check_topology_of_vm(vm_id=vm,
                                          vcpus=2,
                                          cpu_pol='dedicated',
                                          cpu_thr_pol=thread_policy,
                                          numa_num=1,
                                          vm_host=vm_host)
Example #18
0
def flavor_2_nodes(request):
    """
    Create basic flavor with 2 vcpus
    """
    flavor = nova_helper.create_flavor('two_numa_nodes', vcpus=2)[1]
    nova_helper.set_flavor(flavor, **{FlavorSpec.NUMA_NODES: 2})

    def delete():
        nova_helper.delete_flavors(flavor)

    request.addfinalizer(delete)

    return flavor
Example #19
0
def flavor_1_node(request):
    """
    Create basic flavor with 2 vcpus and 1 numa node
    """
    flavor = nova_helper.create_flavor('one_numa_node', vcpus=2)[1]
    nova_helper.set_flavor(flavor, **{FlavorSpec.NUMA_NODES: 1})

    def delete():
        nova_helper.delete_flavors(flavor)

    request.addfinalizer(delete)

    return flavor
Example #20
0
def test_2_nodes_set_numa_node_values_reject(flavor_2_nodes, cpu_policy,
                                             numa_0, numa_1):
    LOG.tc_step("Set flavor cpu_policy spec to {}.".format(cpu_policy))
    nova_helper.set_flavor(flavor=flavor_2_nodes,
                           **{FlavorSpec.CPU_POLICY: cpu_policy})

    args = {FlavorSpec.NUMA_0: numa_0, FlavorSpec.NUMA_1: numa_1}
    LOG.tc_step(
        "Attempt set flavor numa_node spec(s) to {} and verify setting rejected"
        .format(args))
    code, msg = nova_helper.set_flavor(flavor=flavor_2_nodes,
                                       fail_ok=True,
                                       **args)
    assert 1 == code
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
Example #22
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Example #23
0
def live_migrate_vm(end_time, end_event):
    ded_flv = nova_helper.create_flavor(name='dedicated', vcpus=2)[1]
    nova_helper.set_flavor(ded_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    vm_id = vm_helper.boot_vm(name='live-mig', flavor=ded_flv)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    while time.time() < end_time:
        if end_event.is_set():
            assert 0, "Other thread failed. Terminate live-mgiration thread."

        time.sleep(15)
        LOG.tc_step("Live migrate live-mig vm")
        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Example #24
0
def test_1_node_set_guest_numa_node_value_invalid(flavor_1_node,
                                                  numa_node_spec):
    """
    Attempt to set guest NUMA node to invalid cell value, and ensure it's rejected.

    Args:
        flavor_1_node (str): id of flavor with NUMA nodes number set to 1 in extra specs
        numa_node_spec (dict): guest numa node spec to set

    Setup:
        - Create a flavor with number of numa nodes set to 1 in extra specs (module level)

    Test Steps:
        - Attempt to set guest NUMA node spec with invalid value and ensure it's rejected.

    Teardown:
        - Delete created flavor (module level)

    """

    LOG.tc_step(
        "Attempt to set flavor numa_node spec(s) to {} and verify cli is rejected."
        .format(numa_node_spec))
    code, output = nova_helper.set_flavor(flavor=flavor_1_node,
                                          fail_ok=True,
                                          **numa_node_spec)
    assert code == 1, "Expect nova flavor-key set cli to be rejected. Actual: {}".format(
        output)
Example #25
0
def test_0_node_set_guest_numa_node_value_reject(flavor_0_node):
    """
    Test set numa_node.1 is rejected when number of NUMA nodes is not set in extra specs.

    Args:
        flavor_0_node (str): id of flavor with 1 vcpu and without specifying hw:numa_nodes spec.

    Setup:
        - Create a flavor with 1 vcpu and number of numa nodes unset in extra specs (module level)

    Test Steps:
        - Attempt to set guest NUMA node 1 (hw:numa_node.1) and ensure it's rejected.

    Teardown:
        - Delete created flavor (module level)

    """
    numa_node_spec_0 = {FlavorSpec.NUMA_1: 0}

    LOG.tc_step(
        "Attempt to set guest numa node extra spec without numa_nodes extra spec, and verify cli is rejected."
    )
    code, output = nova_helper.set_flavor(flavor=flavor_0_node,
                                          fail_ok=True,
                                          **numa_node_spec_0)
    assert 1 == code, "Expect nova flavor-key set cli to be rejected. Actual: {}".format(
        output)
Example #26
0
def test_flavor_setting_numa_negative(vcpus, vswitch_affinity, numa_nodes,
                                      numa0, numa0_cpus, numa0_mem, numa1,
                                      numa1_cpus, numa1_mem, expt_err):

    LOG.tc_step("Create a 1024ram flavor with {} vcpus".format(vcpus))
    name = 'vswitch_affinity_{}_1G_{}cpu'.format(vswitch_affinity, vcpus)
    flv_id = nova_helper.create_flavor(name=name, vcpus=vcpus, ram=1024)[1]
    ResourceCleanup.add('flavor', flv_id)

    specs = {
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.NUMA_NODES: numa_nodes,
        FlavorSpec.VSWITCH_NUMA_AFFINITY: vswitch_affinity
    }
    tmp_dict = {
        FlavorSpec.NUMA_0: numa0,
        FlavorSpec.NUMA0_CPUS: numa0_cpus,
        FlavorSpec.NUMA0_MEM: numa0_mem,
        FlavorSpec.NUMA_1: numa1,
        FlavorSpec.NUMA1_CPUS: numa1_cpus,
        FlavorSpec.NUMA1_MEM: numa1_mem
    }

    for key, val in tmp_dict.items():
        if val is not None:
            specs[key] = val

    LOG.tc_step(
        "Attempt to set following extra spec to flavor {} and ensure it's rejected: {}"
        .format(flv_id, specs))
    code, output = nova_helper.set_flavor(flv_id, fail_ok=True, **specs)
    assert 1 == code, "Invalid extra spec is not rejected. Details: {}".format(
        output)
    assert eval(expt_err) in output, "Expected error message is not found"
Example #27
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
Example #28
0
def setups(no_simplex):
    vm_helper.ensure_vms_quotas(vms_num=10, cores_num=20, vols_num=10)
    storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    if len(hosts) < 2:
        skip("Less than two hosts with in same storage aggregate")

    LOG.fixture_step("Create a flavor with server group messaging enabled")
    flavor_id = nova_helper.create_flavor('srv_grp_msg', storage_backing=storage_backing)[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.SRV_GRP_MSG: True})

    LOG.fixture_step("Create affinity and anti-affinity server groups")
    affinity_grp = nova_helper.create_server_group(policy='affinity')[1]

    policy = 'soft_anti_affinity' if len(hosts) < 3 else 'anti_affinity'
    anti_affinity_grp = nova_helper.create_server_group(policy=policy)[1]

    return hosts, flavor_id, {'affinity': affinity_grp, 'anti_affinity': anti_affinity_grp}
Example #29
0
def create_shared_flavor(vcpus=2, storage_backing='local_image', cpu_policy='dedicated',
                         numa_nodes=None, node0=None, node1=None, shared_vcpu=None):
    flavor_id = nova_helper.create_flavor(name='shared_core', vcpus=vcpus, storage_backing=storage_backing)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='function')

    LOG.tc_step("Add specific cpu_policy, number_of_numa_nodes, numa_node0, and shared_vcpu to flavor extra specs")
    extra_specs = {FlavorSpec.CPU_POLICY: cpu_policy}
    if numa_nodes is not None:
        extra_specs[FlavorSpec.NUMA_NODES] = numa_nodes
    if node0 is not None:
        extra_specs[FlavorSpec.NUMA_0] = node0
    if node1 is not None:
        extra_specs[FlavorSpec.NUMA_1] = node1
    if shared_vcpu is not None:
        extra_specs[FlavorSpec.SHARED_VCPU] = shared_vcpu

    nova_helper.set_flavor(flavor_id, **extra_specs)
    return flavor_id
Example #30
0
def _boot_vm_vcpu_model(flv_model=None,
                        img_model=None,
                        boot_source='volume',
                        avail_zone=None,
                        vm_host=None):
    LOG.tc_step(
        "Attempt to launch vm from {} with image vcpu model metadata: {}; flavor vcpu model extra spec: {}"
        .format(boot_source, img_model, flv_model))

    flv_id = nova_helper.create_flavor(name='vcpu_{}'.format(flv_model))[1]
    ResourceCleanup.add('flavor', flv_id)
    if flv_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: flv_model})

    if img_model:
        image_id = glance_helper.create_image(
            name='vcpu_{}'.format(img_model),
            cleanup='function',
            **{ImageMetadata.CPU_MODEL: img_model})[1]
    else:
        image_id = glance_helper.get_guest_image(
            guest_os=GuestImages.DEFAULT['guest'])

    if boot_source == 'image':
        source_id = image_id
    else:
        source_id = cinder_helper.create_volume(name='vcpu_model',
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)

    code, vm, msg = vm_helper.boot_vm(name='vcpu_model',
                                      flavor=flv_id,
                                      source=boot_source,
                                      source_id=source_id,
                                      fail_ok=True,
                                      cleanup='function',
                                      avail_zone=avail_zone,
                                      vm_host=vm_host)
    return code, vm, msg