예제 #1
0
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
예제 #2
0
def base_vm():
    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()

    mgmt_nic = {'net-id': mgmt_net_id}
    tenant_nic = {'net-id': tenant_net_id}
    nics = [mgmt_nic, {'net-id': internal_net_id}, {'net-id': tenant_net_id}]

    vm_id = vm_helper.boot_vm(name='base_vm', nics=nics, cleanup='module')[1]

    return vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id
def _test_ea_vm_with_multiple_crypto_vfs(vfs, _flavors, hosts_pci_device_info):
    """
    Verify guest can be launched with multiple crypto VFs, AVP, VIRTIO, and SRIOV interfaces.
    Verify max number of crypto VFs, verify beyond the limit (max is 32) and VM Maintenance
    activity.
    Args:
        vfs:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Launching a VM with flavor flavor_qat_vf_{}".format(vfs))
    vm_name = 'vm_with_{}_vf_pci_device'.format(vfs)
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type},
            {'net-id': internal_net_id, 'vif-model': vif_type}]

    if vfs == 33:
        LOG.tc_step("Verifying  VM with over limit crypto VFs={} can not be launched .....".format(vfs))
    else:
        LOG.tc_step("Verifying  VM with maximum crypto VFs={} .....".format(vfs))

    LOG.info("Boot a vm {} with pci-sriov nics, and flavor=flavor_qat_vf_{}".format(vm_name, vfs))
    flavor_id = _flavors['flavor_qat_vf_{}'.format(vfs)]
    rc, vm_id, msg = vm_helper.boot_vm(vm_name, flavor=flavor_id, nics=nics, cleanup='function', fail_ok=True)

    if vfs == 33:
        assert rc != 0, " Unexpected VM was launched with over limit crypto vfs: {}".format(msg)
    else:
        assert rc == 0, "VM is not successfully launched. Details: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {host_dev_name: vfs}
        # 32 qat-vfs takes more than 1.5 hours to run tests
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, run_cpa=False)

        _perform_nova_actions(vms_dict={vm_name: vm_id}, flavors=_flavors, vfs=vfs)
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, timeout=14400)
예제 #4
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
예제 #5
0
def test_boot_vms():

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id
    }]

    for guest_os in ['ubuntu_14', 'cgcs-guest']:
        glance_helper.get_guest_image(guest_os)
        vm_id = vm_helper.boot_vm(guest_os=guest_os, nics=nics)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        time.sleep(30)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_id,
                                   net_types=['mgmt', 'data', 'internal'])
def _test_ea_max_vms_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify maximum number of guests with Crypto VFs can be launched and
    stabilized

    Args:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Pci device  {}".format(hosts_pci_device_info))

    flavor_id = _flavors['flavor_qat_vf_4']
    # Assume we only have 1 coleto creek pci device on system
    crypto_hosts = list(hosts_pci_device_info.keys())
    host = crypto_hosts[0]
    vf_info = hosts_pci_device_info[host][0]
    vf_device_id = vf_info['vf_device_id']
    vf_count = vf_info['vf_count']
    LOG.info("Vf_device_id {}, count: {}".format(vf_device_id, vf_count))

    # number of vms to launch to max out the total configured device VFs. Each VM is launched with 4 Vfs. 4 Vfs in each
    # compute are reserved for resize nova action.

    number_of_vms = int((vf_count - 4 * len(crypto_hosts)) / 4)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type}]

    vm_helper.ensure_vms_quotas(number_of_vms + 10)

    vms = {}
    LOG.tc_step("Launch {} vms using flavor flavor_qat_vf_4 and nics {}".format(number_of_vms, nics))
    for i in range(1, number_of_vms + 1):
        vm_name = 'vm_crypto_{}'.format(i)
        vm_id = vm_helper.boot_vm(cleanup='function', name='vm_crypto_{}'.format(i), nics=nics, flavor=flavor_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vms[vm_name] = vm_id

    for vm_name_, vm_id_ in vms.items():
        vm_host = vm_helper.get_vm_host(vm_id_)
        host_dev_name = hosts_pci_device_info[vm_host][0]['device_name']
        expt_qat_devs = {host_dev_name: 4}
        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)

        LOG.info("Checking if other host has room for cold migrate vm {}".format(vm_name_))
        for host_ in crypto_hosts:
            if host_ != vm_host:
                total_vfs, used_vfs = network_helper.get_pci_device_vfs_counts_for_host(
                    host_, device_id=vf_device_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

                if int(total_vfs) - int(used_vfs) >= 4:
                    LOG.info("Migrate to other host is possible")
                    expt_res = 0
                    break
        else:
            LOG.info("Migrate to other host is not possible")
            expt_res = 2

        LOG.tc_step("Attempt to cold migrate {} and ensure it {}".format(vm_name_,
                                                                         'succeeds' if expt_res == '0' else 'fails'))
        rc, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert expt_res == rc, "Expected: {}. Actual: {}".format(expt_res, msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        LOG.tc_step("Suspend/resume VM {} ....".format(vm_name_))
        vm_helper.suspend_vm(vm_id_)
        vm_helper.resume_vm(vm_id_)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # vm_host = nova_helper.get_vm_host(vm_id_)
        # total, used = network_helper.get_pci_device_vfs_counts_for_host(vm_host, vf_device_id)[0]
        # if (total - int(used)) >= 4:
        #     expt_res = 0

        flavor_resize_id = _flavors['flavor_resize_qat_vf_4']
        LOG.tc_step("Resize VM {} to new flavor {} with increased memory...".format(vm_name_, flavor_resize_id))
        vm_helper.resize_vm(vm_id_, flavor_resize_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # else:
        #     expt_res = 1
        #     LOG.info("Resizing of vm {} skipped; host {} max out vfs; used vfs = {}".format(vm_name_, vm_host, used))

        LOG.tc_step("Attempt to live migrate {} and ensure it's rejected".format(vm_name_))
        rc, msg = vm_helper.live_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert 6 == rc, "Expect live migration to fail on vm with pci alias device. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)
예제 #7
0
def test_kpi_vm_launch_migrate_rebuild(ixia_required, collect_kpi, hosts_per_backing, boot_from):
    """
    KPI test  - vm startup time.
    Args:
        collect_kpi:
        hosts_per_backing
        boot_from

    Test Steps:
        - Create a flavor with 2 vcpus, dedicated cpu policy and storage backing (if boot-from-image)
        - Launch a vm from specified boot source
        - Collect the vm startup time via event log

    """
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled.")

    # vm launch KPI
    if boot_from != 'volume':
        storage_backing = boot_from
        hosts = hosts_per_backing.get(boot_from)
        if not hosts:
            skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(boot_from))

        target_host = hosts[0]
        LOG.tc_step("Clear local storage cache on {}".format(target_host))
        storage_helper.clear_local_storage_cache(host=target_host)

        LOG.tc_step("Create a flavor with 2 vcpus, dedicated cpu policy, and {} storage".format(storage_backing))
        boot_source = 'image'
        flavor = nova_helper.create_flavor(name=boot_from, vcpus=2, storage_backing=storage_backing)[1]
    else:
        target_host = None
        boot_source = 'volume'
        storage_backing = keywords.host_helper.get_storage_backing_with_max_hosts()[0]
        LOG.tc_step("Create a flavor with 2 vcpus, and dedicated cpu policy and {} storage".format(storage_backing))
        flavor = nova_helper.create_flavor(vcpus=2, storage_backing=storage_backing)[1]

    ResourceCleanup.add('flavor', flavor)
    nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    host_str = ' on {}'.format(target_host) if target_host else ''
    LOG.tc_step("Boot a vm from {}{} and collect vm startup time".format(boot_from, host_str))

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id},
            {'net-id': internal_net_id}]

    vm_id = vm_helper.boot_vm(boot_from, flavor=flavor, source=boot_source, nics=nics, cleanup='function')[1]

    code_boot, out_boot = \
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=VmStartup.NAME.format(boot_from),
                                  log_path=VmStartup.LOG_PATH, end_pattern=VmStartup.END.format(vm_id),
                                  start_pattern=VmStartup.START.format(vm_id), uptime=1)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    # Migration KPI
    if ('ixia_ports' in ProjVar.get_var("LAB")) and (len(hosts_per_backing.get(storage_backing)) >= 2):

        LOG.info("Run migrate tests when more than 2 {} hosts available".format(storage_backing))
        LOG.tc_step("Launch an observer vm")

        mgmt_net_observer = network_helper.get_mgmt_net_id(auth_info=Tenant.get_secondary())
        tenant_net_observer = network_helper.get_tenant_net_id(auth_info=Tenant.get_secondary())
        nics_observer = [{'net-id': mgmt_net_observer},
                         {'net-id': tenant_net_observer},
                         {'net-id': internal_net_id}]
        vm_observer = vm_helper.boot_vm('observer', flavor=flavor, source=boot_source,
                                        nics=nics_observer, cleanup='function', auth_info=Tenant.get_secondary())[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm_observer)
        vm_helper.setup_kernel_routing(vm_observer)
        vm_helper.setup_kernel_routing(vm_id)
        vm_helper.route_vm_pair(vm_observer, vm_id)

        if 'local_lvm' != boot_from:
            # live migration unsupported for boot-from-image vm with local_lvm storage
            LOG.tc_step("Collect live migrate KPI for vm booted from {}".format(boot_from))

            def operation_live(vm_id_):
                code, msg = vm_helper.live_migrate_vm(vm_id=vm_id_)
                assert 0 == code, msg
                vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
                # kernel routing
                vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

            time.sleep(30)
            duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_live, vm_id)
            assert duration > 0, "No traffic loss detected during live migration for {} vm".format(boot_from)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NAME.format(boot_from),
                                      kpi_val=duration, uptime=1, unit='Time(ms)')

            vim_duration = vm_helper.get_live_migrate_duration(vm_id=vm_id)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NOVA_NAME.format(boot_from),
                                      kpi_val=vim_duration, uptime=1, unit='Time(s)')

        LOG.tc_step("Collect cold migrate KPI for vm booted from {}".format(boot_from))

        def operation_cold(vm_id_):
            code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

        time.sleep(30)
        duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_cold, vm_id)
        assert duration > 0, "No traffic loss detected during cold migration for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

        vim_duration = vm_helper.get_cold_migrate_duration(vm_id=vm_id)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NOVA_NAME.format(boot_from),
                                  kpi_val=vim_duration, uptime=1, unit='Time(s)')

    # Rebuild KPI
    if 'volume' != boot_from:
        LOG.info("Run rebuild test for vm booted from image")

        def operation_rebuild(vm_id_):
            code, msg = vm_helper.rebuild_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_vms_from_vm(vm_id, vm_id, net_types=('data', 'internal'))

        LOG.tc_step("Collect vm rebuild KPI for vm booted from {}".format(boot_from))
        time.sleep(30)
        duration = vm_helper.get_ping_loss_duration_on_operation(vm_id, 300, 0.5, operation_rebuild, vm_id)
        assert duration > 0, "No ping loss detected during rebuild for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=Rebuild.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

    # Check the vm boot result at the end after collecting other KPIs
    assert code_boot == 0, out_boot
예제 #8
0
    def base_setup_pci(self):
        LOG.fixture_step(
            "(class) Get an internal network that supports both pci-sriov and "
            "pcipt vif to boot vm")
        avail_pcipt_nets, is_cx4 = network_helper.get_pci_vm_network(
            pci_type='pci-passthrough',
            net_name='internal0-net', rtn_all=True)
        avail_sriov_nets, _ = network_helper.get_pci_vm_network(
            pci_type='pci-sriov',
            net_name='internal0-net', rtn_all=True)

        if not avail_pcipt_nets and not avail_sriov_nets:
            skip(SkipHostIf.PCI_IF_UNAVAIL)

        avail_nets = list(set(avail_pcipt_nets) & set(avail_sriov_nets))
        extra_pcipt_net = avail_pcipt_net = avail_sriov_net = None
        pcipt_seg_ids = {}
        if avail_nets:
            avail_net_name = avail_nets[-1]
            avail_net, segment_id = network_helper.get_network_values(
                network=avail_net_name,
                fields=('id', 'provider:segmentation_id'))
            internal_nets = [avail_net]
            pcipt_seg_ids[avail_net_name] = segment_id
            avail_pcipt_net = avail_sriov_net = avail_net
            LOG.info(
                "Internal network(s) selected for pcipt and sriov: {}".format(
                    avail_net_name))
        else:
            LOG.info("No internal network support both sriov and pcipt")
            internal_nets = []
            if avail_pcipt_nets:
                avail_pcipt_net_name = avail_pcipt_nets[-1]
                avail_pcipt_net, segment_id = network_helper.get_network_values(
                    network=avail_pcipt_net_name,
                    fields=('id', 'provider:segmentation_id'))
                internal_nets.append(avail_pcipt_net)
                pcipt_seg_ids[avail_pcipt_net_name] = segment_id
                LOG.info("pci-passthrough net: {}".format(avail_pcipt_net_name))
            if avail_sriov_nets:
                avail_sriov_net_name = avail_sriov_nets[-1]
                avail_sriov_net = network_helper.get_net_id_from_name(
                    avail_sriov_net_name)
                internal_nets.append(avail_sriov_net)
                LOG.info("pci-sriov net: {}".format(avail_sriov_net_name))

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        base_nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}]
        nics = base_nics + [{'net-id': net_id} for net_id in internal_nets]

        if avail_pcipt_nets and is_cx4:
            extra_pcipt_net_name = avail_nets[0] if avail_nets else \
                avail_pcipt_nets[0]
            extra_pcipt_net, seg_id = network_helper.get_network_values(
                network=extra_pcipt_net_name,
                fields=('id', 'provider:segmentation_id'))
            if extra_pcipt_net not in internal_nets:
                nics.append({'net-id': extra_pcipt_net})
                pcipt_seg_ids[extra_pcipt_net_name] = seg_id

        LOG.fixture_step("(class) Create a flavor with dedicated cpu policy.")
        flavor_id = \
            nova_helper.create_flavor(name='dedicated', vcpus=2, ram=2048,
                                      cleanup='class')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated',
                       FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        LOG.fixture_step(
            "(class) Boot a base pci vm with following nics: {}".format(nics))
        base_vm_pci = \
            vm_helper.boot_vm(name='multiports_pci_base', flavor=flavor_id,
                              nics=nics, cleanup='class')[1]

        LOG.fixture_step("(class) Ping base PCI vm interfaces")
        vm_helper.wait_for_vm_pingable_from_natbox(base_vm_pci)
        vm_helper.ping_vms_from_vm(to_vms=base_vm_pci, from_vm=base_vm_pci,
                                   net_types=['data', 'internal'])

        return base_vm_pci, flavor_id, base_nics, avail_sriov_net, \
            avail_pcipt_net, pcipt_seg_ids, extra_pcipt_net
예제 #9
0
def test_gpu_passthrough(setup_alias):
    """
        Test case for GPU passthrough

    Test Steps:
        - Create pci alias for dev type 'gpu' and 'user'
        - Calculate the initial pf used in 'nova device-list'
        - Create flavor with extra spec with PCI_PASSTHROUGH_ALIAS device gpu & usb
        - Boot a vm with created flavor & gpu passthrough specfic centos image
        - Verify the pf used increased after vm launch


    Teardown:
        - Delete created vm, flavor, pci_alias

    """

    nova_gpu_alias, nova_usb_alias = setup_alias

    # initialize parameter for basic operation
    name = 'gpu_passthrough'
    guest_os = 'centos_gpu'
    pf = 1

    LOG.tc_step("Create a flavor for GPU Passthrough")
    flavor_id = nova_helper.create_flavor(name=name, root_disk=16)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    extra_spec = {
        FlavorSpec.PCI_PASSTHROUGH_ALIAS:
        '{}:{},{}:{}'.format(nova_gpu_alias, pf, nova_usb_alias, pf),
        FlavorSpec.CPU_POLICY:
        'dedicated'
    }

    nova_helper.set_flavor(flavor_id, **extra_spec)

    initial_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    initial_usb_pfs_used = _calculate_pf_used(nova_usb_alias)

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'module'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()

    mgmt_nic = {'net-id': mgmt_net_id}
    tenant_nic = {'net-id': tenant_net_id}
    nics = [mgmt_nic, tenant_nic]

    LOG.tc_step("Boot a vm  {} with pci-alias and flavor ".format(
        nova_gpu_alias, flavor_id))
    vm_id = vm_helper.boot_vm(name,
                              flavor=flavor_id,
                              source='image',
                              source_id=image_id,
                              nics=nics,
                              cleanup='function')[1]

    actual_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    expected_gpu_pfs_used = initial_gpu_pfs_used + pf
    assert actual_gpu_pfs_used == expected_gpu_pfs_used, "actual gpu pci pfs is not equal to expected pci pfs"

    actual_usb_pfs_used = _calculate_pf_used(nova_usb_alias)
    expected_usb_pfs_used = initial_usb_pfs_used + pf
    assert actual_usb_pfs_used == expected_usb_pfs_used, "actual usb pci pfs is not equal to expected pci pfs"

    LOG.tc_step("Delete vm  {} ".format(vm_id))
    vm_helper.delete_vms(vms=vm_id, stop_first=False)

    actual_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    assert actual_gpu_pfs_used == initial_gpu_pfs_used, \
        "actual gpu pci pfs is not equal to expected pci pfs after vm delete"

    actual_usb_pfs_used = _calculate_pf_used(nova_usb_alias)
    assert actual_usb_pfs_used == initial_usb_pfs_used, \
        "actual usb pci pfs is not equal to expected pci pfs after vm delete"

    LOG.tc_step(
        "Deleting nova service parameter service parameters for gpu & usb")
예제 #10
0
def verify_heat_resource(to_verify=None,
                         template_name=None,
                         stack_name=None,
                         auth_info=None,
                         fail_ok=False):
    """
        Verify the heat resource creation/deletion for given resources

        Args:
            to_verify (list): Resources to verify creation or deletion.
            template_name (str): template to be used to create heat stack.
            stack_name(str): stack name used to create the stack
            auth_info
            fail_ok

        Returns (int): return 0 if success 1 if failure

    """
    LOG.info("Verifying heat resource {}".format(to_verify))

    rtn_code = 0
    msg = "Heat resource {} appeared".format(to_verify)
    item_verified = to_verify

    if to_verify is 'volume':
        LOG.info("Verifying volume")
        vol_name = getattr(Heat, template_name)['vol_name']
        resource_found = cinder_helper.get_volumes(name=vol_name)

    elif to_verify is 'ceilometer_alarm':
        resource_found = ceilometer_helper.get_alarms(name=stack_name,
                                                      strict=False)

    elif to_verify is 'neutron_port':
        port_name = getattr(Heat, template_name)['port_name']
        if port_name is None:
            port_name = stack_name
        resource_found = network_helper.get_ports(port_name=port_name)

    elif to_verify is 'neutron_provider_net_range':
        resource_found = network_helper.get_network_segment_ranges(
            field='name', physical_network='sample_physnet_X')

    elif to_verify is 'nova_server_group':
        resource_found = nova_helper.get_server_groups(name=stack_name)

    elif to_verify is 'vm':
        vm_name = getattr(Heat, template_name)['vm_name']
        resource_found = vm_helper.get_vms(vms=vm_name, strict=False)

    elif to_verify is 'nova_flavor':
        resource_found = nova_helper.get_flavors(name='sample-flavor')

    elif to_verify is 'neutron_net':
        resource_found = network_helper.get_tenant_net_id(
            net_name='sample-net')

    elif to_verify is 'image':
        resource_found = glance_helper.get_image_id_from_name(
            name='sample_image')

    elif to_verify is 'subnet':
        resource_found = network_helper.get_subnets(name='sample_subnet')

    elif to_verify is 'floating_ip':
        resource_found = network_helper.get_floating_ips()

    elif to_verify is 'router':
        resource_found = network_helper.get_tenant_router(
            router_name='sample_router', auth_info=auth_info)

    elif to_verify is 'router_gateway':
        item_verified = 'sample_gateway_router'
        resource_found = network_helper.get_tenant_router(
            router_name='sample_gateway_router', auth_info=auth_info)
        if resource_found:
            item_verified = to_verify
            resource_found = network_helper.get_router_ext_gateway_info(
                router_id=resource_found, auth_info=auth_info)

    elif to_verify is 'router_interface':
        item_verified = 'sample_if_router'
        router_id = network_helper.get_tenant_router(
            router_name='sample_if_router', auth_info=auth_info)
        resource_found = router_id
        if resource_found:
            item_verified = 'sample_if_subnet'
            subnets = network_helper.get_subnets(name='sample_if_subnet',
                                                 auth_info=auth_info)
            resource_found = subnets
            if resource_found:
                item_verified = to_verify
                router_subnets = network_helper.get_router_subnets(
                    router=router_id, auth_info=auth_info)
                resource_found = resource_found[0] in router_subnets

    elif to_verify is 'security_group':
        resource_found = network_helper.get_security_groups(
            name='SecurityGroupDeluxe')
    elif to_verify is 'key_pair':
        kp_name = getattr(Heat, template_name)['key_pair_name']
        resource_found = nova_helper.get_keypairs(name=kp_name)
    elif to_verify is 'neutron_qos':
        resource_found = network_helper.get_qos_policies(name='SampleQoS',
                                                         auth_info=auth_info)
    else:
        raise ValueError("Unknown item to verify: {}".format(to_verify))

    if not resource_found:
        msg = "Heat stack {} resource {} does not exist".format(
            stack_name, item_verified)
        if fail_ok:
            rtn_code = 1
        else:
            assert resource_found, msg

    LOG.info(msg)
    return rtn_code, msg
예제 #11
0
def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
    """
    Ping between two vms with given vif models

    Test Steps:
        - Create a favor with dedicated cpu policy and proper root disk size
        - Create a volume from guest image under test with proper size
        - Boot two vms with given vif models from above volume and flavor
        - Ping VMs from NatBox and between two vms

    Test Teardown:
        - Delete vms, volumes, flavor, glance image created

    """
    if guest_os == 'default':
        guest_os = GuestImages.DEFAULT['guest']

    reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True
    cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None
    image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup,
                                             use_existing=reuse)

    LOG.tc_step("Create a favor dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os,
                                          cleanup='function')[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    net_ids = (mgmt_net_id, tenant_net_id, internal_net_id)
    vms = []
    for vifs_for_vm in (vm1_vifs, vm2_vifs):
        # compose vm nics
        nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id,
                             guest_os=guest_os)
        net_types = ['mgmt', 'data', 'internal'][:len(nics)]
        LOG.tc_step("Create a volume from {} image".format(guest_os))
        vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os),
                                             source_id=image_id,
                                             guest_image=guest_os,
                                             cleanup='function')[1]

        LOG.tc_step(
            "Boot a {} vm with {} vifs from above flavor and volume".format(
                guest_os, vifs_for_vm))
        vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id,
                                  cleanup='function',
                                  source='volume', source_id=vol_id, nics=nics,
                                  guest_os=guest_os)[1]

        LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        vms.append(vm_id)

    LOG.tc_step(
        "Ping between two vms over management, data, and internal networks")
    vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1],
                               net_types=net_types)
    vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0],
                               net_types=net_types)
예제 #12
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
예제 #13
0
def test_vif_model_from_image(img_vif, check_avs_pattern):
    """
    Test vif model set in image metadata is reflected in vm nics when use normal vnic type.
    Args:
        img_vif (str):
        check_avs_pattern:

    Test Steps:
        - Create a glance image with given img_vif in metadata
        - Create a cinder volume from above image
        - Create a vm with 3 vnics from above cinder volume:
            - nic1 and nic2 with normal vnic type
            - nic3 with avp (if AVS, otherwise normal)
        - Verify nic1 and nic2 vif model is the same as img_vif
        - Verify nic3 vif model is avp (if AVS, otherwise normal)

    """

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(img_vif))
    img_id = glance_helper.create_image('vif_{}'.format(img_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           img_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(img_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else img_vif
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id,
        'vif-model': vif_model
    }]

    LOG.tc_step(
        "Boot a vm from above volume with following nics: {}".format(nics))
    vm_id = vm_helper.boot_vm(name='vif_img_{}'.format(img_vif),
                              nics=nics,
                              source='volume',
                              source_id=volume_id,
                              cleanup='function')[1]

    LOG.tc_step(
        "Verify vnics info from virsh to ensure tenant net vif is as specified in image metadata"
    )
    internal_mac = network_helper.get_ports(server=vm_id,
                                            network=internal_net_id,
                                            field='MAC Address')[0]
    vm_interfaces = vm_helper.get_vm_interfaces_via_virsh(vm_id)
    for vm_if in vm_interfaces:
        if_mac, if_model = vm_if
        if if_mac == internal_mac:
            assert if_model == vif_model
        else:
            assert if_model == img_vif
예제 #14
0
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern):
    """
    Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image

    Args:
        vol_vif (str)

    Test Steps:
        - Create cinder volume
        - Boot VM use WRL image
        - Attach cinder volume to WRL virtio/avp instance
        - Check VM nics vifs are not changed

    Teardown:
        - Delete VM
        - Delete cinder volume
    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else 'virtio'
    nics = [
        {
            'net-id': mgmt_net_id
        },
        {
            'net-id': tenant_net_id
        },
        {
            'net-id': internal_net_id,
            'vif-model': vif_model
        },
    ]

    LOG.tc_step("Boot up VM from default tis image")
    vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif),
                              source='image',
                              nics=nics,
                              cleanup='function')[1]

    prev_ports = network_helper.get_ports(server=vm_id)

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(vol_vif))
    img_id = glance_helper.create_image('vif_{}'.format(vol_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           vol_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    # boot a cinder volume and attached it to vm
    LOG.tc_step("Attach cinder Volume to VM")
    vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id)

    LOG.tc_step("Check vm nics vif models are not changed")
    post_ports = network_helper.get_ports(server=vm_id)

    assert prev_ports == post_ports