def _setup_vm(vm_ids, hosts_to_boot):
    """
    Set up source and destination vm
    Args:
        vm_ids: List of already booted VMs
        hosts_to_boot: Boot on same compute if same_host is true or in difference host

    Returns:
        vm_ids: append vm_id created
        source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic
    """

    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    mgmt_nic = {'net-id': mgmt_net_id}
    internal_nic = {'net-id': internal_net_id}
    nics = [mgmt_nic, internal_nic]

    source_vm_id = vm_helper.boot_vm(name='source_vm',
                                     nics=nics,
                                     cleanup='function',
                                     vm_host=hosts_to_boot[0])[1]
    vm_ids.append(source_vm_id)
    dest_vm_id = vm_helper.boot_vm(name='dest_vm',
                                   nics=nics,
                                   cleanup='function',
                                   vm_host=hosts_to_boot[1])[1]
    vm_ids.append(dest_vm_id)
    LOG.info("Source VM {} and Destination VM {} booted".format(
        source_vm_id, dest_vm_id))

    return vm_ids, source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic
Beispiel #2
0
def setup_qos(request, avs_required):
    LOG.fixture_step("Creating new QoS")
    scheduler = {'weight': 100}
    qos_new = network_helper.create_qos(scheduler=scheduler,
                                        description="Test QoS",
                                        cleanup='function')[1]
    LOG.fixture_step("Retrieving network ids and Qos'")
    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    qos_internal = network_helper.get_network_values(network=internal_net_id,
                                                     fields='wrs-tm:qos')[0]
    qos_mgmt = network_helper.get_network_values(network=mgmt_net_id,
                                                 fields='wrs-tm:qos')[0]

    def reset():
        LOG.fixture_step("Resetting QoS for tenant and internal networks")

        network_helper.update_net_qos(net_id=internal_net_id,
                                      qos_id=qos_internal)
        network_helper.update_net_qos(net_id=mgmt_net_id, qos_id=qos_mgmt)

        LOG.fixture_step("Deleting created QoS")
        network_helper.delete_qos(qos_new)

    request.addfinalizer(reset)
    return internal_net_id, mgmt_net_id, qos_new
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
Beispiel #4
0
def base_vm():
    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()

    mgmt_nic = {'net-id': mgmt_net_id}
    tenant_nic = {'net-id': tenant_net_id}
    nics = [mgmt_nic, {'net-id': internal_net_id}, {'net-id': tenant_net_id}]

    vm_id = vm_helper.boot_vm(name='base_vm', nics=nics, cleanup='module')[1]

    return vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id
def _test_ea_vm_co_existence_with_and_without_crypto_vfs(_flavors):
    """
    Verify guest with cypto VFs can co-exists with guest without crypto VFs.
    Args:
        _flavors:

    Returns:

    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    vm_params = {'vm_no_crypto_1': [_flavors['flavor_none'], [{'net-id': mgmt_net_id},
                                                              {'net-id': tenant_net_ids[0], 'vif-model': vif_type},
                                                              {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_no_crypto_2': [_flavors['flavor_none'], [{'net-id': mgmt_net_id},
                                                              {'net-id': tenant_net_ids[1], 'vif-model': vif_type},
                                                              {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_sriov_crypto': [_flavors['flavor_qat_vf_1'],
                                     [{'net-id': mgmt_net_id},
                                      {'net-id': tenant_net_ids[2], 'vif-model': vif_type},
                                      {'net-id': internal_net_id, 'vif-model': 'pci-sriov'}]],
                 'vm_crypto_1': [_flavors['flavor_qat_vf_1'], [{'net-id': mgmt_net_id},
                                                               {'net-id': tenant_net_ids[3], 'vif-model': vif_type},
                                                               {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_crypto_2': [_flavors['flavor_qat_vf_1'], [{'net-id': mgmt_net_id},
                                                               {'net-id': tenant_net_ids[4], 'vif-model': vif_type},
                                                               {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 }

    vms = {}
    vms_qat_devs = {}

    for vm_name, param in vm_params.items():
        LOG.tc_step("Boot vm {} with {} flavor".format(vm_name, param[0]))
        vm_id = vm_helper.boot_vm('{}'.format(vm_name), flavor=param[0], nics=param[1], cleanup='function')[1]

        LOG.info("Verify  VM can be pinged from NAT box...")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id), "VM is not pingable."
        vms[vm_name] = vm_id
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {} if '_no_crypto' in vm_name else {host_dev_name: 1}
        vms_qat_devs[vm_id] = expt_qat_devs
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)

    _perform_nova_actions(vms, flavors=_flavors, vfs=None)

    for vm_id_, expt_qat_devs_ in vms_qat_devs.items():
        check_helper.check_qat_service(vm_id_, qat_devs=expt_qat_devs_)
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
def _test_ea_vm_with_multiple_crypto_vfs(vfs, _flavors, hosts_pci_device_info):
    """
    Verify guest can be launched with multiple crypto VFs, AVP, VIRTIO, and SRIOV interfaces.
    Verify max number of crypto VFs, verify beyond the limit (max is 32) and VM Maintenance
    activity.
    Args:
        vfs:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Launching a VM with flavor flavor_qat_vf_{}".format(vfs))
    vm_name = 'vm_with_{}_vf_pci_device'.format(vfs)
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type},
            {'net-id': internal_net_id, 'vif-model': vif_type}]

    if vfs == 33:
        LOG.tc_step("Verifying  VM with over limit crypto VFs={} can not be launched .....".format(vfs))
    else:
        LOG.tc_step("Verifying  VM with maximum crypto VFs={} .....".format(vfs))

    LOG.info("Boot a vm {} with pci-sriov nics, and flavor=flavor_qat_vf_{}".format(vm_name, vfs))
    flavor_id = _flavors['flavor_qat_vf_{}'.format(vfs)]
    rc, vm_id, msg = vm_helper.boot_vm(vm_name, flavor=flavor_id, nics=nics, cleanup='function', fail_ok=True)

    if vfs == 33:
        assert rc != 0, " Unexpected VM was launched with over limit crypto vfs: {}".format(msg)
    else:
        assert rc == 0, "VM is not successfully launched. Details: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {host_dev_name: vfs}
        # 32 qat-vfs takes more than 1.5 hours to run tests
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, run_cpa=False)

        _perform_nova_actions(vms_dict={vm_name: vm_id}, flavors=_flavors, vfs=vfs)
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, timeout=14400)
Beispiel #8
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
Beispiel #9
0
def setup_port_security():

    LOG.fixture_step(
        "Copy userdata files from test server to active controller")
    for i in (1, 2):
        source = "{}/port_security/vm{}-userdata.txt".format(
            TestServerPath.TEST_FILES, i)
        common.scp_from_test_server_to_active_controller(
            source_path=source, dest_dir=StxPath.USERDATA)

    LOG.fixture_step("Enable port security ml2 extension driver on system")
    system_helper.add_ml2_extension_drivers(drivers='port_security')

    LOG.fixture_step("Select neutron networks to test")
    internal_net_id = network_helper.get_internal_net_id()
    nics = [{
        'net-id': network_helper.get_mgmt_net_id()
    }, {
        'net-id': internal_net_id
    }]

    return internal_net_id, nics
Beispiel #10
0
def test_boot_vms():

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id
    }]

    for guest_os in ['ubuntu_14', 'cgcs-guest']:
        glance_helper.get_guest_image(guest_os)
        vm_id = vm_helper.boot_vm(guest_os=guest_os, nics=nics)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        time.sleep(30)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_id,
                                   net_types=['mgmt', 'data', 'internal'])
Beispiel #11
0
def test_kpi_vm_launch_migrate_rebuild(ixia_required, collect_kpi, hosts_per_backing, boot_from):
    """
    KPI test  - vm startup time.
    Args:
        collect_kpi:
        hosts_per_backing
        boot_from

    Test Steps:
        - Create a flavor with 2 vcpus, dedicated cpu policy and storage backing (if boot-from-image)
        - Launch a vm from specified boot source
        - Collect the vm startup time via event log

    """
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled.")

    # vm launch KPI
    if boot_from != 'volume':
        storage_backing = boot_from
        hosts = hosts_per_backing.get(boot_from)
        if not hosts:
            skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(boot_from))

        target_host = hosts[0]
        LOG.tc_step("Clear local storage cache on {}".format(target_host))
        storage_helper.clear_local_storage_cache(host=target_host)

        LOG.tc_step("Create a flavor with 2 vcpus, dedicated cpu policy, and {} storage".format(storage_backing))
        boot_source = 'image'
        flavor = nova_helper.create_flavor(name=boot_from, vcpus=2, storage_backing=storage_backing)[1]
    else:
        target_host = None
        boot_source = 'volume'
        storage_backing = keywords.host_helper.get_storage_backing_with_max_hosts()[0]
        LOG.tc_step("Create a flavor with 2 vcpus, and dedicated cpu policy and {} storage".format(storage_backing))
        flavor = nova_helper.create_flavor(vcpus=2, storage_backing=storage_backing)[1]

    ResourceCleanup.add('flavor', flavor)
    nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    host_str = ' on {}'.format(target_host) if target_host else ''
    LOG.tc_step("Boot a vm from {}{} and collect vm startup time".format(boot_from, host_str))

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id},
            {'net-id': internal_net_id}]

    vm_id = vm_helper.boot_vm(boot_from, flavor=flavor, source=boot_source, nics=nics, cleanup='function')[1]

    code_boot, out_boot = \
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=VmStartup.NAME.format(boot_from),
                                  log_path=VmStartup.LOG_PATH, end_pattern=VmStartup.END.format(vm_id),
                                  start_pattern=VmStartup.START.format(vm_id), uptime=1)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    # Migration KPI
    if ('ixia_ports' in ProjVar.get_var("LAB")) and (len(hosts_per_backing.get(storage_backing)) >= 2):

        LOG.info("Run migrate tests when more than 2 {} hosts available".format(storage_backing))
        LOG.tc_step("Launch an observer vm")

        mgmt_net_observer = network_helper.get_mgmt_net_id(auth_info=Tenant.get_secondary())
        tenant_net_observer = network_helper.get_tenant_net_id(auth_info=Tenant.get_secondary())
        nics_observer = [{'net-id': mgmt_net_observer},
                         {'net-id': tenant_net_observer},
                         {'net-id': internal_net_id}]
        vm_observer = vm_helper.boot_vm('observer', flavor=flavor, source=boot_source,
                                        nics=nics_observer, cleanup='function', auth_info=Tenant.get_secondary())[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm_observer)
        vm_helper.setup_kernel_routing(vm_observer)
        vm_helper.setup_kernel_routing(vm_id)
        vm_helper.route_vm_pair(vm_observer, vm_id)

        if 'local_lvm' != boot_from:
            # live migration unsupported for boot-from-image vm with local_lvm storage
            LOG.tc_step("Collect live migrate KPI for vm booted from {}".format(boot_from))

            def operation_live(vm_id_):
                code, msg = vm_helper.live_migrate_vm(vm_id=vm_id_)
                assert 0 == code, msg
                vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
                # kernel routing
                vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

            time.sleep(30)
            duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_live, vm_id)
            assert duration > 0, "No traffic loss detected during live migration for {} vm".format(boot_from)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NAME.format(boot_from),
                                      kpi_val=duration, uptime=1, unit='Time(ms)')

            vim_duration = vm_helper.get_live_migrate_duration(vm_id=vm_id)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NOVA_NAME.format(boot_from),
                                      kpi_val=vim_duration, uptime=1, unit='Time(s)')

        LOG.tc_step("Collect cold migrate KPI for vm booted from {}".format(boot_from))

        def operation_cold(vm_id_):
            code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

        time.sleep(30)
        duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_cold, vm_id)
        assert duration > 0, "No traffic loss detected during cold migration for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

        vim_duration = vm_helper.get_cold_migrate_duration(vm_id=vm_id)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NOVA_NAME.format(boot_from),
                                  kpi_val=vim_duration, uptime=1, unit='Time(s)')

    # Rebuild KPI
    if 'volume' != boot_from:
        LOG.info("Run rebuild test for vm booted from image")

        def operation_rebuild(vm_id_):
            code, msg = vm_helper.rebuild_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_vms_from_vm(vm_id, vm_id, net_types=('data', 'internal'))

        LOG.tc_step("Collect vm rebuild KPI for vm booted from {}".format(boot_from))
        time.sleep(30)
        duration = vm_helper.get_ping_loss_duration_on_operation(vm_id, 300, 0.5, operation_rebuild, vm_id)
        assert duration > 0, "No ping loss detected during rebuild for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=Rebuild.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

    # Check the vm boot result at the end after collecting other KPIs
    assert code_boot == 0, out_boot
Beispiel #12
0
def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
    """
    Ping between two vms with given vif models

    Test Steps:
        - Create a favor with dedicated cpu policy and proper root disk size
        - Create a volume from guest image under test with proper size
        - Boot two vms with given vif models from above volume and flavor
        - Ping VMs from NatBox and between two vms

    Test Teardown:
        - Delete vms, volumes, flavor, glance image created

    """
    if guest_os == 'default':
        guest_os = GuestImages.DEFAULT['guest']

    reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True
    cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None
    image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup,
                                             use_existing=reuse)

    LOG.tc_step("Create a favor dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os,
                                          cleanup='function')[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    net_ids = (mgmt_net_id, tenant_net_id, internal_net_id)
    vms = []
    for vifs_for_vm in (vm1_vifs, vm2_vifs):
        # compose vm nics
        nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id,
                             guest_os=guest_os)
        net_types = ['mgmt', 'data', 'internal'][:len(nics)]
        LOG.tc_step("Create a volume from {} image".format(guest_os))
        vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os),
                                             source_id=image_id,
                                             guest_image=guest_os,
                                             cleanup='function')[1]

        LOG.tc_step(
            "Boot a {} vm with {} vifs from above flavor and volume".format(
                guest_os, vifs_for_vm))
        vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id,
                                  cleanup='function',
                                  source='volume', source_id=vol_id, nics=nics,
                                  guest_os=guest_os)[1]

        LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        vms.append(vm_id)

    LOG.tc_step(
        "Ping between two vms over management, data, and internal networks")
    vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1],
                               net_types=net_types)
    vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0],
                               net_types=net_types)
Beispiel #13
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
def test_vif_model_from_image(img_vif, check_avs_pattern):
    """
    Test vif model set in image metadata is reflected in vm nics when use normal vnic type.
    Args:
        img_vif (str):
        check_avs_pattern:

    Test Steps:
        - Create a glance image with given img_vif in metadata
        - Create a cinder volume from above image
        - Create a vm with 3 vnics from above cinder volume:
            - nic1 and nic2 with normal vnic type
            - nic3 with avp (if AVS, otherwise normal)
        - Verify nic1 and nic2 vif model is the same as img_vif
        - Verify nic3 vif model is avp (if AVS, otherwise normal)

    """

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(img_vif))
    img_id = glance_helper.create_image('vif_{}'.format(img_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           img_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(img_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else img_vif
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id,
        'vif-model': vif_model
    }]

    LOG.tc_step(
        "Boot a vm from above volume with following nics: {}".format(nics))
    vm_id = vm_helper.boot_vm(name='vif_img_{}'.format(img_vif),
                              nics=nics,
                              source='volume',
                              source_id=volume_id,
                              cleanup='function')[1]

    LOG.tc_step(
        "Verify vnics info from virsh to ensure tenant net vif is as specified in image metadata"
    )
    internal_mac = network_helper.get_ports(server=vm_id,
                                            network=internal_net_id,
                                            field='MAC Address')[0]
    vm_interfaces = vm_helper.get_vm_interfaces_via_virsh(vm_id)
    for vm_if in vm_interfaces:
        if_mac, if_model = vm_if
        if if_mac == internal_mac:
            assert if_model == vif_model
        else:
            assert if_model == img_vif
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern):
    """
    Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image

    Args:
        vol_vif (str)

    Test Steps:
        - Create cinder volume
        - Boot VM use WRL image
        - Attach cinder volume to WRL virtio/avp instance
        - Check VM nics vifs are not changed

    Teardown:
        - Delete VM
        - Delete cinder volume
    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else 'virtio'
    nics = [
        {
            'net-id': mgmt_net_id
        },
        {
            'net-id': tenant_net_id
        },
        {
            'net-id': internal_net_id,
            'vif-model': vif_model
        },
    ]

    LOG.tc_step("Boot up VM from default tis image")
    vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif),
                              source='image',
                              nics=nics,
                              cleanup='function')[1]

    prev_ports = network_helper.get_ports(server=vm_id)

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(vol_vif))
    img_id = glance_helper.create_image('vif_{}'.format(vol_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           vol_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    # boot a cinder volume and attached it to vm
    LOG.tc_step("Attach cinder Volume to VM")
    vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id)

    LOG.tc_step("Check vm nics vif models are not changed")
    post_ports = network_helper.get_ports(server=vm_id)

    assert prev_ports == post_ports