def _test_ea_vm_co_existence_with_and_without_crypto_vfs(_flavors):
    """
    Verify guest with cypto VFs can co-exists with guest without crypto VFs.
    Args:
        _flavors:

    Returns:

    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    vm_params = {'vm_no_crypto_1': [_flavors['flavor_none'], [{'net-id': mgmt_net_id},
                                                              {'net-id': tenant_net_ids[0], 'vif-model': vif_type},
                                                              {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_no_crypto_2': [_flavors['flavor_none'], [{'net-id': mgmt_net_id},
                                                              {'net-id': tenant_net_ids[1], 'vif-model': vif_type},
                                                              {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_sriov_crypto': [_flavors['flavor_qat_vf_1'],
                                     [{'net-id': mgmt_net_id},
                                      {'net-id': tenant_net_ids[2], 'vif-model': vif_type},
                                      {'net-id': internal_net_id, 'vif-model': 'pci-sriov'}]],
                 'vm_crypto_1': [_flavors['flavor_qat_vf_1'], [{'net-id': mgmt_net_id},
                                                               {'net-id': tenant_net_ids[3], 'vif-model': vif_type},
                                                               {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_crypto_2': [_flavors['flavor_qat_vf_1'], [{'net-id': mgmt_net_id},
                                                               {'net-id': tenant_net_ids[4], 'vif-model': vif_type},
                                                               {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 }

    vms = {}
    vms_qat_devs = {}

    for vm_name, param in vm_params.items():
        LOG.tc_step("Boot vm {} with {} flavor".format(vm_name, param[0]))
        vm_id = vm_helper.boot_vm('{}'.format(vm_name), flavor=param[0], nics=param[1], cleanup='function')[1]

        LOG.info("Verify  VM can be pinged from NAT box...")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id), "VM is not pingable."
        vms[vm_name] = vm_id
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {} if '_no_crypto' in vm_name else {host_dev_name: 1}
        vms_qat_devs[vm_id] = expt_qat_devs
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)

    _perform_nova_actions(vms, flavors=_flavors, vfs=None)

    for vm_id_, expt_qat_devs_ in vms_qat_devs.items():
        check_helper.check_qat_service(vm_id_, qat_devs=expt_qat_devs_)
예제 #2
0
def get_pci_vm_nics(vif_model, pci_net_id, other_pci_net_id=None):
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': pci_net_id,
        'vif-model': vif_model
    }]
    if other_pci_net_id:
        nics.append({'net-id': other_pci_net_id, 'vif-model': vif_model})

    return nics
예제 #3
0
def base_vm():
    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()

    mgmt_nic = {'net-id': mgmt_net_id}
    tenant_nic = {'net-id': tenant_net_id}
    nics = [mgmt_nic, {'net-id': internal_net_id}, {'net-id': tenant_net_id}]

    vm_id = vm_helper.boot_vm(name='base_vm', nics=nics, cleanup='module')[1]

    return vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
def _test_ea_vm_with_multiple_crypto_vfs(vfs, _flavors, hosts_pci_device_info):
    """
    Verify guest can be launched with multiple crypto VFs, AVP, VIRTIO, and SRIOV interfaces.
    Verify max number of crypto VFs, verify beyond the limit (max is 32) and VM Maintenance
    activity.
    Args:
        vfs:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Launching a VM with flavor flavor_qat_vf_{}".format(vfs))
    vm_name = 'vm_with_{}_vf_pci_device'.format(vfs)
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type},
            {'net-id': internal_net_id, 'vif-model': vif_type}]

    if vfs == 33:
        LOG.tc_step("Verifying  VM with over limit crypto VFs={} can not be launched .....".format(vfs))
    else:
        LOG.tc_step("Verifying  VM with maximum crypto VFs={} .....".format(vfs))

    LOG.info("Boot a vm {} with pci-sriov nics, and flavor=flavor_qat_vf_{}".format(vm_name, vfs))
    flavor_id = _flavors['flavor_qat_vf_{}'.format(vfs)]
    rc, vm_id, msg = vm_helper.boot_vm(vm_name, flavor=flavor_id, nics=nics, cleanup='function', fail_ok=True)

    if vfs == 33:
        assert rc != 0, " Unexpected VM was launched with over limit crypto vfs: {}".format(msg)
    else:
        assert rc == 0, "VM is not successfully launched. Details: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {host_dev_name: vfs}
        # 32 qat-vfs takes more than 1.5 hours to run tests
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, run_cpa=False)

        _perform_nova_actions(vms_dict={vm_name: vm_id}, flavors=_flavors, vfs=vfs)
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, timeout=14400)
예제 #6
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
def is_vm_filesystem_rw(vm_id, rootfs='vda', vm_image_name=None):
    """

    Args:
        vm_id:
        rootfs (str|list):
        vm_image_name (None|str):

    Returns:

    """
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=240)

    if vm_image_name is None:
        vm_image_name = GuestImages.DEFAULT['guest']

    router_host = dhcp_host = None
    try:
        LOG.info(
            "---------Collecting router and dhcp agent host info-----------")
        router_host = network_helper.get_router_host()
        mgmt_net = network_helper.get_mgmt_net_id()
        dhcp_host = network_helper.get_network_agents(field='Host',
                                                      network=mgmt_net)

        if isinstance(rootfs, str):
            rootfs = [rootfs]
        for fs in rootfs:
            vm_helper.mount_attached_volume(vm_id=vm_id, rootfs=fs)
        with vm_helper.ssh_to_vm_from_natbox(vm_id,
                                             vm_image_name=vm_image_name,
                                             retry_timeout=300) as vm_ssh:
            for fs in rootfs:
                cmd = "mount | grep {} | grep rw | wc -l".format(fs)
                cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1]
                if cmd_output != '1':
                    LOG.info("Filesystem /dev/{} is not rw for VM: {}".format(
                        fs, vm_id))
                    return False
            return True
    except exceptions.SSHRetryTimeout:
        LOG.error("Failed to ssh, collecting vm console log.")
        vm_helper.get_console_logs(vm_ids=vm_id)
        LOG.info("Router host: {}. dhcp agent host: {}".format(
            router_host, dhcp_host))
        raise
예제 #8
0
def get_heat_params(param_name=None):
    """
    Generate parameters for heat based on keywords

    Args:
        param_name (str): template to be used to create heat stack.

    Returns (str): return None if failure or the val for the given param

    """
    if param_name is 'NETWORK':
        net_id = network_helper.get_mgmt_net_id()
        return network_helper.get_net_name_from_id(net_id=net_id)
    elif param_name is 'FLAVOR':
        return 'small_ded'
    elif param_name is 'IMAGE':
        return GuestImages.DEFAULT['guest']
    else:
        return None
예제 #9
0
def setup_port_security():

    LOG.fixture_step(
        "Copy userdata files from test server to active controller")
    for i in (1, 2):
        source = "{}/port_security/vm{}-userdata.txt".format(
            TestServerPath.TEST_FILES, i)
        common.scp_from_test_server_to_active_controller(
            source_path=source, dest_dir=StxPath.USERDATA)

    LOG.fixture_step("Enable port security ml2 extension driver on system")
    system_helper.add_ml2_extension_drivers(drivers='port_security')

    LOG.fixture_step("Select neutron networks to test")
    internal_net_id = network_helper.get_internal_net_id()
    nics = [{
        'net-id': network_helper.get_mgmt_net_id()
    }, {
        'net-id': internal_net_id
    }]

    return internal_net_id, nics
예제 #10
0
def test_boot_vms():

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id
    }]

    for guest_os in ['ubuntu_14', 'cgcs-guest']:
        glance_helper.get_guest_image(guest_os)
        vm_id = vm_helper.boot_vm(guest_os=guest_os, nics=nics)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        time.sleep(30)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_id,
                                   net_types=['mgmt', 'data', 'internal'])
예제 #11
0
def __launch_vm_scale_stack():
    stack_name = VM_SCALE_STACK
    template_name = '{}.yaml'.format(stack_name)
    image = GuestImages.DEFAULT['guest']
    high_val = 50
    template_path = os.path.join(ProjVar.get_var('USER_FILE_DIR'),
                                 HEAT_SCENARIO_PATH, template_name)
    key_pair = vm_helper.get_default_keypair()
    net_id = network_helper.get_mgmt_net_id()
    network = network_helper.get_net_name_from_id(net_id=net_id)

    flavor_id = nova_helper.create_flavor(name='heartbeat',
                                          cleanup='module')[1]
    extra_specs = {FlavorSpec.GUEST_HEARTBEAT: 'True'}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    params = {
        'FLAVOR': flavor_id,
        'KEYPAIR': key_pair,
        'IMAGE': image,
        'NETWORK': network,
        'HIGH_VALUE': high_val
    }
    LOG.fixture_step(
        "Create Heat Stack for auto scalable vms using template {}".format(
            template_name))
    heat_helper.create_stack(stack_name=stack_name,
                             template=template_path,
                             parameters=params,
                             fail_ok=False,
                             cleanup='module')

    LOG.fixture_step(
        "Verify first VM is created via heat stack for auto scaling")
    vm_id = vm_helper.get_vm_id_from_name(vm_name=stack_name, strict=False)

    return stack_name, vm_id
def _test_ea_max_vms_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify maximum number of guests with Crypto VFs can be launched and
    stabilized

    Args:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Pci device  {}".format(hosts_pci_device_info))

    flavor_id = _flavors['flavor_qat_vf_4']
    # Assume we only have 1 coleto creek pci device on system
    crypto_hosts = list(hosts_pci_device_info.keys())
    host = crypto_hosts[0]
    vf_info = hosts_pci_device_info[host][0]
    vf_device_id = vf_info['vf_device_id']
    vf_count = vf_info['vf_count']
    LOG.info("Vf_device_id {}, count: {}".format(vf_device_id, vf_count))

    # number of vms to launch to max out the total configured device VFs. Each VM is launched with 4 Vfs. 4 Vfs in each
    # compute are reserved for resize nova action.

    number_of_vms = int((vf_count - 4 * len(crypto_hosts)) / 4)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type}]

    vm_helper.ensure_vms_quotas(number_of_vms + 10)

    vms = {}
    LOG.tc_step("Launch {} vms using flavor flavor_qat_vf_4 and nics {}".format(number_of_vms, nics))
    for i in range(1, number_of_vms + 1):
        vm_name = 'vm_crypto_{}'.format(i)
        vm_id = vm_helper.boot_vm(cleanup='function', name='vm_crypto_{}'.format(i), nics=nics, flavor=flavor_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vms[vm_name] = vm_id

    for vm_name_, vm_id_ in vms.items():
        vm_host = vm_helper.get_vm_host(vm_id_)
        host_dev_name = hosts_pci_device_info[vm_host][0]['device_name']
        expt_qat_devs = {host_dev_name: 4}
        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)

        LOG.info("Checking if other host has room for cold migrate vm {}".format(vm_name_))
        for host_ in crypto_hosts:
            if host_ != vm_host:
                total_vfs, used_vfs = network_helper.get_pci_device_vfs_counts_for_host(
                    host_, device_id=vf_device_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

                if int(total_vfs) - int(used_vfs) >= 4:
                    LOG.info("Migrate to other host is possible")
                    expt_res = 0
                    break
        else:
            LOG.info("Migrate to other host is not possible")
            expt_res = 2

        LOG.tc_step("Attempt to cold migrate {} and ensure it {}".format(vm_name_,
                                                                         'succeeds' if expt_res == '0' else 'fails'))
        rc, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert expt_res == rc, "Expected: {}. Actual: {}".format(expt_res, msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        LOG.tc_step("Suspend/resume VM {} ....".format(vm_name_))
        vm_helper.suspend_vm(vm_id_)
        vm_helper.resume_vm(vm_id_)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # vm_host = nova_helper.get_vm_host(vm_id_)
        # total, used = network_helper.get_pci_device_vfs_counts_for_host(vm_host, vf_device_id)[0]
        # if (total - int(used)) >= 4:
        #     expt_res = 0

        flavor_resize_id = _flavors['flavor_resize_qat_vf_4']
        LOG.tc_step("Resize VM {} to new flavor {} with increased memory...".format(vm_name_, flavor_resize_id))
        vm_helper.resize_vm(vm_id_, flavor_resize_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # else:
        #     expt_res = 1
        #     LOG.info("Resizing of vm {} skipped; host {} max out vfs; used vfs = {}".format(vm_name_, vm_host, used))

        LOG.tc_step("Attempt to live migrate {} and ensure it's rejected".format(vm_name_))
        rc, msg = vm_helper.live_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert 6 == rc, "Expect live migration to fail on vm with pci alias device. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)
예제 #13
0
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern):
    """
    Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image

    Args:
        vol_vif (str)

    Test Steps:
        - Create cinder volume
        - Boot VM use WRL image
        - Attach cinder volume to WRL virtio/avp instance
        - Check VM nics vifs are not changed

    Teardown:
        - Delete VM
        - Delete cinder volume
    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else 'virtio'
    nics = [
        {
            'net-id': mgmt_net_id
        },
        {
            'net-id': tenant_net_id
        },
        {
            'net-id': internal_net_id,
            'vif-model': vif_model
        },
    ]

    LOG.tc_step("Boot up VM from default tis image")
    vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif),
                              source='image',
                              nics=nics,
                              cleanup='function')[1]

    prev_ports = network_helper.get_ports(server=vm_id)

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(vol_vif))
    img_id = glance_helper.create_image('vif_{}'.format(vol_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           vol_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    # boot a cinder volume and attached it to vm
    LOG.tc_step("Attach cinder Volume to VM")
    vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id)

    LOG.tc_step("Check vm nics vif models are not changed")
    post_ports = network_helper.get_ports(server=vm_id)

    assert prev_ports == post_ports
예제 #14
0
def test_vif_model_from_image(img_vif, check_avs_pattern):
    """
    Test vif model set in image metadata is reflected in vm nics when use normal vnic type.
    Args:
        img_vif (str):
        check_avs_pattern:

    Test Steps:
        - Create a glance image with given img_vif in metadata
        - Create a cinder volume from above image
        - Create a vm with 3 vnics from above cinder volume:
            - nic1 and nic2 with normal vnic type
            - nic3 with avp (if AVS, otherwise normal)
        - Verify nic1 and nic2 vif model is the same as img_vif
        - Verify nic3 vif model is avp (if AVS, otherwise normal)

    """

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(img_vif))
    img_id = glance_helper.create_image('vif_{}'.format(img_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           img_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(img_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else img_vif
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id,
        'vif-model': vif_model
    }]

    LOG.tc_step(
        "Boot a vm from above volume with following nics: {}".format(nics))
    vm_id = vm_helper.boot_vm(name='vif_img_{}'.format(img_vif),
                              nics=nics,
                              source='volume',
                              source_id=volume_id,
                              cleanup='function')[1]

    LOG.tc_step(
        "Verify vnics info from virsh to ensure tenant net vif is as specified in image metadata"
    )
    internal_mac = network_helper.get_ports(server=vm_id,
                                            network=internal_net_id,
                                            field='MAC Address')[0]
    vm_interfaces = vm_helper.get_vm_interfaces_via_virsh(vm_id)
    for vm_if in vm_interfaces:
        if_mac, if_model = vm_if
        if if_mac == internal_mac:
            assert if_model == vif_model
        else:
            assert if_model == img_vif
예제 #15
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
예제 #16
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
예제 #17
0
def test_kpi_vm_launch_migrate_rebuild(ixia_required, collect_kpi, hosts_per_backing, boot_from):
    """
    KPI test  - vm startup time.
    Args:
        collect_kpi:
        hosts_per_backing
        boot_from

    Test Steps:
        - Create a flavor with 2 vcpus, dedicated cpu policy and storage backing (if boot-from-image)
        - Launch a vm from specified boot source
        - Collect the vm startup time via event log

    """
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled.")

    # vm launch KPI
    if boot_from != 'volume':
        storage_backing = boot_from
        hosts = hosts_per_backing.get(boot_from)
        if not hosts:
            skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(boot_from))

        target_host = hosts[0]
        LOG.tc_step("Clear local storage cache on {}".format(target_host))
        storage_helper.clear_local_storage_cache(host=target_host)

        LOG.tc_step("Create a flavor with 2 vcpus, dedicated cpu policy, and {} storage".format(storage_backing))
        boot_source = 'image'
        flavor = nova_helper.create_flavor(name=boot_from, vcpus=2, storage_backing=storage_backing)[1]
    else:
        target_host = None
        boot_source = 'volume'
        storage_backing = keywords.host_helper.get_storage_backing_with_max_hosts()[0]
        LOG.tc_step("Create a flavor with 2 vcpus, and dedicated cpu policy and {} storage".format(storage_backing))
        flavor = nova_helper.create_flavor(vcpus=2, storage_backing=storage_backing)[1]

    ResourceCleanup.add('flavor', flavor)
    nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    host_str = ' on {}'.format(target_host) if target_host else ''
    LOG.tc_step("Boot a vm from {}{} and collect vm startup time".format(boot_from, host_str))

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id},
            {'net-id': internal_net_id}]

    vm_id = vm_helper.boot_vm(boot_from, flavor=flavor, source=boot_source, nics=nics, cleanup='function')[1]

    code_boot, out_boot = \
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=VmStartup.NAME.format(boot_from),
                                  log_path=VmStartup.LOG_PATH, end_pattern=VmStartup.END.format(vm_id),
                                  start_pattern=VmStartup.START.format(vm_id), uptime=1)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    # Migration KPI
    if ('ixia_ports' in ProjVar.get_var("LAB")) and (len(hosts_per_backing.get(storage_backing)) >= 2):

        LOG.info("Run migrate tests when more than 2 {} hosts available".format(storage_backing))
        LOG.tc_step("Launch an observer vm")

        mgmt_net_observer = network_helper.get_mgmt_net_id(auth_info=Tenant.get_secondary())
        tenant_net_observer = network_helper.get_tenant_net_id(auth_info=Tenant.get_secondary())
        nics_observer = [{'net-id': mgmt_net_observer},
                         {'net-id': tenant_net_observer},
                         {'net-id': internal_net_id}]
        vm_observer = vm_helper.boot_vm('observer', flavor=flavor, source=boot_source,
                                        nics=nics_observer, cleanup='function', auth_info=Tenant.get_secondary())[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm_observer)
        vm_helper.setup_kernel_routing(vm_observer)
        vm_helper.setup_kernel_routing(vm_id)
        vm_helper.route_vm_pair(vm_observer, vm_id)

        if 'local_lvm' != boot_from:
            # live migration unsupported for boot-from-image vm with local_lvm storage
            LOG.tc_step("Collect live migrate KPI for vm booted from {}".format(boot_from))

            def operation_live(vm_id_):
                code, msg = vm_helper.live_migrate_vm(vm_id=vm_id_)
                assert 0 == code, msg
                vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
                # kernel routing
                vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

            time.sleep(30)
            duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_live, vm_id)
            assert duration > 0, "No traffic loss detected during live migration for {} vm".format(boot_from)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NAME.format(boot_from),
                                      kpi_val=duration, uptime=1, unit='Time(ms)')

            vim_duration = vm_helper.get_live_migrate_duration(vm_id=vm_id)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NOVA_NAME.format(boot_from),
                                      kpi_val=vim_duration, uptime=1, unit='Time(s)')

        LOG.tc_step("Collect cold migrate KPI for vm booted from {}".format(boot_from))

        def operation_cold(vm_id_):
            code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

        time.sleep(30)
        duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_cold, vm_id)
        assert duration > 0, "No traffic loss detected during cold migration for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

        vim_duration = vm_helper.get_cold_migrate_duration(vm_id=vm_id)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NOVA_NAME.format(boot_from),
                                  kpi_val=vim_duration, uptime=1, unit='Time(s)')

    # Rebuild KPI
    if 'volume' != boot_from:
        LOG.info("Run rebuild test for vm booted from image")

        def operation_rebuild(vm_id_):
            code, msg = vm_helper.rebuild_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_vms_from_vm(vm_id, vm_id, net_types=('data', 'internal'))

        LOG.tc_step("Collect vm rebuild KPI for vm booted from {}".format(boot_from))
        time.sleep(30)
        duration = vm_helper.get_ping_loss_duration_on_operation(vm_id, 300, 0.5, operation_rebuild, vm_id)
        assert duration > 0, "No ping loss detected during rebuild for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=Rebuild.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

    # Check the vm boot result at the end after collecting other KPIs
    assert code_boot == 0, out_boot
예제 #18
0
    def base_setup_pci(self):
        LOG.fixture_step(
            "(class) Get an internal network that supports both pci-sriov and "
            "pcipt vif to boot vm")
        avail_pcipt_nets, is_cx4 = network_helper.get_pci_vm_network(
            pci_type='pci-passthrough',
            net_name='internal0-net', rtn_all=True)
        avail_sriov_nets, _ = network_helper.get_pci_vm_network(
            pci_type='pci-sriov',
            net_name='internal0-net', rtn_all=True)

        if not avail_pcipt_nets and not avail_sriov_nets:
            skip(SkipHostIf.PCI_IF_UNAVAIL)

        avail_nets = list(set(avail_pcipt_nets) & set(avail_sriov_nets))
        extra_pcipt_net = avail_pcipt_net = avail_sriov_net = None
        pcipt_seg_ids = {}
        if avail_nets:
            avail_net_name = avail_nets[-1]
            avail_net, segment_id = network_helper.get_network_values(
                network=avail_net_name,
                fields=('id', 'provider:segmentation_id'))
            internal_nets = [avail_net]
            pcipt_seg_ids[avail_net_name] = segment_id
            avail_pcipt_net = avail_sriov_net = avail_net
            LOG.info(
                "Internal network(s) selected for pcipt and sriov: {}".format(
                    avail_net_name))
        else:
            LOG.info("No internal network support both sriov and pcipt")
            internal_nets = []
            if avail_pcipt_nets:
                avail_pcipt_net_name = avail_pcipt_nets[-1]
                avail_pcipt_net, segment_id = network_helper.get_network_values(
                    network=avail_pcipt_net_name,
                    fields=('id', 'provider:segmentation_id'))
                internal_nets.append(avail_pcipt_net)
                pcipt_seg_ids[avail_pcipt_net_name] = segment_id
                LOG.info("pci-passthrough net: {}".format(avail_pcipt_net_name))
            if avail_sriov_nets:
                avail_sriov_net_name = avail_sriov_nets[-1]
                avail_sriov_net = network_helper.get_net_id_from_name(
                    avail_sriov_net_name)
                internal_nets.append(avail_sriov_net)
                LOG.info("pci-sriov net: {}".format(avail_sriov_net_name))

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        base_nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}]
        nics = base_nics + [{'net-id': net_id} for net_id in internal_nets]

        if avail_pcipt_nets and is_cx4:
            extra_pcipt_net_name = avail_nets[0] if avail_nets else \
                avail_pcipt_nets[0]
            extra_pcipt_net, seg_id = network_helper.get_network_values(
                network=extra_pcipt_net_name,
                fields=('id', 'provider:segmentation_id'))
            if extra_pcipt_net not in internal_nets:
                nics.append({'net-id': extra_pcipt_net})
                pcipt_seg_ids[extra_pcipt_net_name] = seg_id

        LOG.fixture_step("(class) Create a flavor with dedicated cpu policy.")
        flavor_id = \
            nova_helper.create_flavor(name='dedicated', vcpus=2, ram=2048,
                                      cleanup='class')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated',
                       FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        LOG.fixture_step(
            "(class) Boot a base pci vm with following nics: {}".format(nics))
        base_vm_pci = \
            vm_helper.boot_vm(name='multiports_pci_base', flavor=flavor_id,
                              nics=nics, cleanup='class')[1]

        LOG.fixture_step("(class) Ping base PCI vm interfaces")
        vm_helper.wait_for_vm_pingable_from_natbox(base_vm_pci)
        vm_helper.ping_vms_from_vm(to_vms=base_vm_pci, from_vm=base_vm_pci,
                                   net_types=['data', 'internal'])

        return base_vm_pci, flavor_id, base_nics, avail_sriov_net, \
            avail_pcipt_net, pcipt_seg_ids, extra_pcipt_net
예제 #19
0
def test_gpu_passthrough(setup_alias):
    """
        Test case for GPU passthrough

    Test Steps:
        - Create pci alias for dev type 'gpu' and 'user'
        - Calculate the initial pf used in 'nova device-list'
        - Create flavor with extra spec with PCI_PASSTHROUGH_ALIAS device gpu & usb
        - Boot a vm with created flavor & gpu passthrough specfic centos image
        - Verify the pf used increased after vm launch


    Teardown:
        - Delete created vm, flavor, pci_alias

    """

    nova_gpu_alias, nova_usb_alias = setup_alias

    # initialize parameter for basic operation
    name = 'gpu_passthrough'
    guest_os = 'centos_gpu'
    pf = 1

    LOG.tc_step("Create a flavor for GPU Passthrough")
    flavor_id = nova_helper.create_flavor(name=name, root_disk=16)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    extra_spec = {
        FlavorSpec.PCI_PASSTHROUGH_ALIAS:
        '{}:{},{}:{}'.format(nova_gpu_alias, pf, nova_usb_alias, pf),
        FlavorSpec.CPU_POLICY:
        'dedicated'
    }

    nova_helper.set_flavor(flavor_id, **extra_spec)

    initial_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    initial_usb_pfs_used = _calculate_pf_used(nova_usb_alias)

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'module'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()

    mgmt_nic = {'net-id': mgmt_net_id}
    tenant_nic = {'net-id': tenant_net_id}
    nics = [mgmt_nic, tenant_nic]

    LOG.tc_step("Boot a vm  {} with pci-alias and flavor ".format(
        nova_gpu_alias, flavor_id))
    vm_id = vm_helper.boot_vm(name,
                              flavor=flavor_id,
                              source='image',
                              source_id=image_id,
                              nics=nics,
                              cleanup='function')[1]

    actual_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    expected_gpu_pfs_used = initial_gpu_pfs_used + pf
    assert actual_gpu_pfs_used == expected_gpu_pfs_used, "actual gpu pci pfs is not equal to expected pci pfs"

    actual_usb_pfs_used = _calculate_pf_used(nova_usb_alias)
    expected_usb_pfs_used = initial_usb_pfs_used + pf
    assert actual_usb_pfs_used == expected_usb_pfs_used, "actual usb pci pfs is not equal to expected pci pfs"

    LOG.tc_step("Delete vm  {} ".format(vm_id))
    vm_helper.delete_vms(vms=vm_id, stop_first=False)

    actual_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    assert actual_gpu_pfs_used == initial_gpu_pfs_used, \
        "actual gpu pci pfs is not equal to expected pci pfs after vm delete"

    actual_usb_pfs_used = _calculate_pf_used(nova_usb_alias)
    assert actual_usb_pfs_used == initial_usb_pfs_used, \
        "actual usb pci pfs is not equal to expected pci pfs after vm delete"

    LOG.tc_step(
        "Deleting nova service parameter service parameters for gpu & usb")
예제 #20
0
def test_port_trunking():
    """
    Port trunking feature test cases

    Test Steps:
        - Create networks
        - Create subnets
        - Create a parent port and subports
        - Create a truck with parent port and subports
        - Boot the first vm with the trunk
        - Create the second trunk without subport
        - Boot the second vm
        - Add support to trunk
        - Configure vlan interfaces inside guests
        - Verify connectivity via vlan interfaces
        - Remove the subport from trunk and verify connectivity
        - Add the support to trunk and verify connectivity
        - Do vm actions and verify connectivity


    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    vif_model = 'avp' if system_helper.is_avs() else None
    network_names = ['network11', 'network12', 'network13']
    net_ids = []
    sub_nets = ["30.0.0.0/24", "30.0.1.0/24", "30.0.2.0/24"]
    subnet_ids = []
    # parent ports and sub ports for trunk 1 and trunk 2
    trunk1_parent_port = 'vrf10'
    trunk1_subport_1 = 'vrf11'
    trunk1_subport_2 = 'vrf12'

    trunk2_parent_port = 'host10'
    trunk2_subport_1 = 'host11'
    trunk2_subport_2 = 'host12'

    # vlan id for the subports
    segment_1 = 1
    segment_2 = 2

    LOG.tc_step("Create Networks to be used by trunk")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    # Create Trunks
    LOG.tc_step("Create Parent port for trunk 1")
    t1_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk1_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t1_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk1_parent_port)[0]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk1_subport_1,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk1_subport_2,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t1_sub_ports = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t1_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 1")
    trunk1_id = network_helper.create_trunk(t1_parent_port_id,
                                            name='trunk-1',
                                            sub_ports=t1_sub_ports,
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{'net-id': mgmt_net_id}, {'port-id': t1_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm_id = vm_helper.boot_vm(name='vm-with-trunk1-port',
                              nics=nics,
                              cleanup='function')[1]
    LOG.tc_step("Setup vlan interfaces inside guest")
    _bring_up_vlan_interface(vm_id, 'eth1', [segment_1])

    # Create second trunk port  with out the subports and vm
    LOG.tc_step("Create Parent port for trunk 2")
    t2_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk2_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t2_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk2_parent_port)[0]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk2_subport_1,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk2_subport_2,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t2_sub_ports = [{
        'port': t2_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t2_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 2")
    trunk2_id = network_helper.create_trunk(t2_parent_port_id,
                                            name='trunk-2',
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics_2 = [{'net-id': mgmt_net_id}, {'port-id': t2_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm2_id = vm_helper.boot_vm(name='vm-with-trunk2-port',
                               nics=nics_2,
                               cleanup='function')[1]

    LOG.tc_step("Add the sub ports to the second truck")
    network_helper.set_trunk(trunk2_id, sub_ports=t2_sub_ports)

    LOG.tc_step("Setup VLAN interfaces inside guest")
    _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

    # ping b/w 2 vms using the vlan interfaces
    eth_name = 'eth1.1'

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = network_helper.get_ip_for_eth(eth_name=eth_name,
                                                ssh_client=vm_ssh)

    if ip_addr:
        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step("Ping on vlan interface from guest")
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

    # unset the subport on trunk_1 and try the ping (it will fail)
    LOG.tc_step(
        "Removing a subport from trunk and ping on vlan interface inside guest"
    )
    ret_code_10 = network_helper.unset_trunk(trunk1_id,
                                             sub_ports=[t1_sub_port1_id])[0]
    assert ret_code_10 == 0, "Subports not removed as expected."

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        ping = network_helper.ping_server(ip_addr,
                                          ssh_client=vm2_ssh,
                                          num_pings=20,
                                          fail_ok=True)[0]
        assert ping == 100, "Ping did not fail as expected."

    # set the subport on trunk_1 and try the ping (it will work)
    LOG.tc_step(
        " Add back the subport to trunk and ping on vlan interface inside guest"
    )
    t1_sub_port = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }]
    network_helper.set_trunk(trunk1_id, sub_ports=t1_sub_port)

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    # VM operation and ping
    for vm_actions in [['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm2_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm2_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management networks still works "
            "after {}".format(vm_actions))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm2_id,
                                   net_types=['mgmt'])

        if vm_actions[0] == 'cold_migrate':
            LOG.tc_step("Setup VLAN interfaces inside guest")
            _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step(
                "Ping on vlan interface from guest after action {}".format(
                    vm_actions))
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

        vm_host = vm_helper.get_vm_host(vm2_id)

        vm_on_target_host = vm_helper.get_vms_on_host(vm_host)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm2_id, ping_vms=True)

    for vm_id_on_target_host in vm_on_target_host:
        LOG.tc_step("Setup VLAN interfaces inside guest")
        _bring_up_vlan_interface(vm_id_on_target_host, 'eth1', [segment_1])

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest after evacuation")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    LOG.tc_step(
        "Attempt to delete trunk when in use, expect pass for AVS only")
    code = network_helper.delete_trunks(trunks=trunk1_id, fail_ok=True)[0]

    if system_helper.is_avs():
        assert 0 == code, "Failed to delete port trunk when it's used by a running VM wiht AVS"
    else:
        assert 1 == code, "Trunk is deleted when it's used by a running VM with OVS"
예제 #21
0
def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
    """
    Ping between two vms with given vif models

    Test Steps:
        - Create a favor with dedicated cpu policy and proper root disk size
        - Create a volume from guest image under test with proper size
        - Boot two vms with given vif models from above volume and flavor
        - Ping VMs from NatBox and between two vms

    Test Teardown:
        - Delete vms, volumes, flavor, glance image created

    """
    if guest_os == 'default':
        guest_os = GuestImages.DEFAULT['guest']

    reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True
    cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None
    image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup,
                                             use_existing=reuse)

    LOG.tc_step("Create a favor dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os,
                                          cleanup='function')[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    net_ids = (mgmt_net_id, tenant_net_id, internal_net_id)
    vms = []
    for vifs_for_vm in (vm1_vifs, vm2_vifs):
        # compose vm nics
        nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id,
                             guest_os=guest_os)
        net_types = ['mgmt', 'data', 'internal'][:len(nics)]
        LOG.tc_step("Create a volume from {} image".format(guest_os))
        vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os),
                                             source_id=image_id,
                                             guest_image=guest_os,
                                             cleanup='function')[1]

        LOG.tc_step(
            "Boot a {} vm with {} vifs from above flavor and volume".format(
                guest_os, vifs_for_vm))
        vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id,
                                  cleanup='function',
                                  source='volume', source_id=vol_id, nics=nics,
                                  guest_os=guest_os)[1]

        LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        vms.append(vm_id)

    LOG.tc_step(
        "Ping between two vms over management, data, and internal networks")
    vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1],
                               net_types=net_types)
    vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0],
                               net_types=net_types)
예제 #22
0
def test_ipv6_subnet(vif_model, check_avs_pattern):
    """
    Ipv6 Subnet feature test cases

    Test Steps:
        - Create networks
        - Create Ipv6 enabled subnet
        - Boot the first vm with the ipv6 subnet
        - Boot the second vm with ipv6 subnet
        - Configure interfaces to get ipv6 addr
        - Verify connectivity ipv6 interfaces
        - Ping default router

    Test Teardown:
        - Delete vms, subnets, and networks created

    """
    network_names = ['network11']
    net_ids = []
    sub_nets = ["fd00:0:0:21::/64"]
    gateway_ipv6 = "fd00:0:0:21::1"
    subnet_ids = []

    dns_server = "2001:4860:4860::8888"

    LOG.tc_step("Create Networks to setup IPV6 subnet")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create IPV6 Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         ip_version=6,
                                         dns_servers=dns_server,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    LOG.tc_step("Boot a VM with mgmt net and Network with IPV6 subnet")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': net_ids[0],
        'vif-model': vif_model
    }]

    image = None
    if vif_model == 'e1000':
        image = glance_helper.create_image(name=vif_model,
                                           hw_vif_model=vif_model,
                                           cleanup='function')[1]

    LOG.tc_step("Boot a vm with created nets")
    vm_id = vm_helper.boot_vm(name='vm-with-ipv6-nic',
                              nics=nics,
                              image_id=image,
                              cleanup='function')[1]
    LOG.tc_step("Setup interface script inside guest and restart network")
    _bring_up_interface(vm_id)

    LOG.tc_step("Boot a second vm with created nets")
    vm_id2 = vm_helper.boot_vm(name='vm2-with-ipv6-nic',
                               nics=nics,
                               cleanup='function')[1]
    LOG.tc_step("Setup interface script inside guest and restart network")
    _bring_up_interface(vm_id2)

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = _get_ipv6_for_eth(eth_name='eth1', ssh_client=vm_ssh)

        if ip_addr is '':
            LOG.info('Ip addr is not assigned')
            assert ip_addr != '', "Failed to assign ip"
        else:
            LOG.info("Got Ipv6 address:{}".format(ip_addr))

    with vm_helper.ssh_to_vm_from_natbox(vm_id2) as vm_ssh:
        LOG.tc_step("ping b/w vms on the ipv6 net")
        ping = _ping6_vms(ssh_client=vm_ssh, ipv6_addr=ip_addr)
        assert ping == 0, "Ping between VMs failed"
        LOG.tc_step("ping Default Gateway from vms on the ipv6 net")
        ping = _ping6_vms(ssh_client=vm_ssh, ipv6_addr=gateway_ipv6)
        assert ping == 0, "Ping to default router failed"