Example #1
0
def check_avs_pattern():
    """
    Skip test for OVS system, if test name contains certain pattern
    """
    test_name = ProjVar.get_var('TEST_NAME')
    avs_pattern = 'avp|avr|avs|dpdk|e1000'
    if re.search(avs_pattern, test_name) and not system_helper.is_avs():
        skip("Test unsupported by OVS")
Example #2
0
def update_net_quota(request):
    if not system_helper.is_avs():
        skip('Feature only supported by AVS')

    network_quota = vm_helper.get_quotas('networks')[0]
    vm_helper.set_quotas(networks=network_quota + 6)

    def _revert_quota():
        vm_helper.set_quotas(networks=network_quota)

    request.addfinalizer(_revert_quota)
Example #3
0
def test_vswitch_ports_cores_mapping():
    if not system_helper.is_avs():
        skip("vshell commands unsupported by OVS")

    up_hypervisors = host_helper.get_hypervisors(state='up')
    assert up_hypervisors, "No hypervisor is up."

    for host in up_hypervisors:
        LOG.tc_step("Find out expected port-engine mapping for {} via vshell port/engine-list".format(host))

        check_helper.check_host_vswitch_port_engine_map(host)
Example #4
0
def test_ceilometer_meters_exist(meters):
    """
    Validate ceilometer meters exist
    Verification Steps:
    1. Check via 'openstack metric list' or 'ceilometer event-list'
    2. Check meters for router, subnet, image, and vswitch exists
    """
    # skip('CGTS-10102: Disable TC until US116020 completes')
    time_create = system_helper.get_host_values('controller-1',
                                                'created_at')[0]
    current_isotime = datetime.utcnow().isoformat(sep='T')

    if common.get_timedelta_for_isotimes(
            time_create, current_isotime) > timedelta(hours=24):
        skip("Over a day since install. Meters no longer exist.")

    # Check meter for routers
    LOG.tc_step(
        "Check number of 'router.create.end' events is at least the number of existing routers"
    )
    routers = network_helper.get_routers()
    router_id = routers[0]
    check_event_in_tenant_or_admin(resource_id=router_id,
                                   event_type='router.create.end')

    # Check meter for subnets
    LOG.tc_step(
        "Check number of 'subnet.create' meters is at least the number of existing subnets"
    )
    subnets = network_helper.get_subnets(
        name=Tenant.get_primary().get('tenant'), strict=False)
    subnet = random.choice(subnets)
    LOG.info("Subnet to check in ceilometer event list: {}".format(subnet))
    check_event_in_tenant_or_admin(resource_id=subnet,
                                   event_type='subnet.create.end')

    # Check meter for image
    LOG.tc_step('Check meters for image')
    images = glance_helper.get_images(field='id')
    resource_ids = gnocchi_helper.get_metrics(metric_name='image.size',
                                              field='resource_id')
    assert set(images) <= set(resource_ids)

    # Check meter for vswitch
    LOG.tc_step('Check meters for vswitch')
    resource_ids = gnocchi_helper.get_metrics(
        metric_name='vswitch.engine.util', fail_ok=True, field='resource_id')
    if system_helper.is_avs():
        hypervisors = host_helper.get_hypervisors()
        assert len(hypervisors) <= len(resource_ids), \
            "Each nova hypervisor should have at least one vSwitch core"
    else:
        assert not resource_ids, "vswitch meters found for STX build"
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
Example #6
0
def snat_setups(request):
    find_dvr = 'True' if request.param == 'distributed' else 'False'

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info in [primary_tenant, other_tenant]:
        tenant_router = network_helper.get_tenant_router(auth_info=auth_info)
        is_dvr_router = network_helper.get_router_values(router_id=tenant_router,
                                                         fields='distributed')[0]
        if find_dvr == str(is_dvr_router):
            LOG.fixture_step("Setting primary tenant to {}".format(common.get_tenant_name(auth_info)))
            Tenant.set_primary(auth_info)
            break
    else:
        skip("No {} router found on system.".format(request.param))

    LOG.fixture_step("Update router to enable SNAT")
    network_helper.set_router_gateway(enable_snat=True)     # Check snat is handled by the keyword

    def disable_snat():
        LOG.fixture_step("Disable SNAT on tenant router")
        try:
            network_helper.set_router_gateway(enable_snat=False)
        finally:
            LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant']))
            Tenant.set_primary(primary_tenant)
    request.addfinalizer(disable_snat)

    LOG.fixture_step("Boot a VM from volume")
    vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False, cleanup='module')[1]

    if system_helper.is_avs():
        LOG.fixture_step("Attempt to ping from NatBox and ensure if fails")
        ping_res = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, fail_ok=True, use_fip=False)
        assert ping_res is False, "VM can still be ping'd from outside after SNAT enabled without floating ip."

    LOG.fixture_step("Create a floating ip and associate it to VM")
    floatingip = network_helper.create_floating_ip(cleanup='module')[1]
    network_helper.associate_floating_ip_to_vm(floatingip, vm_id)

    LOG.fixture_step("Ping vm's floating ip from NatBox and ensure it's reachable")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, use_fip=True)

    return vm_id, floatingip
Example #7
0
def test_horizon_dashboard_help_redirection(admin_home_pg_container):
    """Verifies Help link redirects to the right URL."""

    if not system_helper.is_avs():
        skip('No support page available for STX')

    admin_home_pg_container.go_to_help_page()
    admin_home_pg_container._wait_until(
        lambda _: admin_home_pg_container.is_nth_window_opened(2))

    admin_home_pg_container.switch_window()
    time.sleep(2)
    assert 'http://www.windriver.com/support/' == \
           admin_home_pg_container.get_current_page_url()

    admin_home_pg_container.close_window()
    admin_home_pg_container.switch_window()
    horizon.test_result = True
Example #8
0
def test_heat_template(template_name, revert_quota):
    """
    Basic Heat template testing:
        various Heat templates.

    Args:
        template_name (str): e.g, OS_Cinder_Volume.
        revert_quota (dict): test fixture to revert network quota.

    =====
    Prerequisites (skip test if not met):
        - at least two hypervisors hosts on the system

    Test Steps:
        - Create a heat stack with the given template
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - Delete Heat stack and verify resource deletion

    """
    if 'QoSPolicy' in template_name:
        if not system_helper.is_avs():
            skip("QoS policy is not supported by OVS")

    elif template_name == 'OS_Neutron_RouterInterface.yaml':
        LOG.tc_step("Increase network quota by 2 for every tenant")
        tenants_quotas = revert_quota
        for tenant_id, quotas in tenants_quotas.items():
            network_quota, subnet_quota = quotas
            vm_helper.set_quotas(tenant=tenant_id,
                                 networks=network_quota + 10,
                                 subnets=subnet_quota + 10)

    elif template_name == 'OS_Nova_Server.yaml':
        # create new image to do update later
        LOG.tc_step("Creating an Image to be used for heat update later")
        glance_helper.create_image(name='tis-centos2', cleanup='function')

    # add test step
    verify_basic_template(template_name)
Example #9
0
    def test_kpi_evacuate(self, vm_type, get_hosts, collect_kpi):
        if not collect_kpi:
            skip("KPI only test. Skip due to kpi collection is not enabled.")
        if not system_helper.is_avs() and vm_type in ('dpdk', 'avp'):
            skip('avp vif unsupported by OVS')

        def operation(vm_id_, host_):
            vm_helper.evacuate_vms(host=host_,
                                   vms_to_check=vm_id_,
                                   ping_vms=True)

        vm_test, vm_observer = vm_helper.launch_vm_pair(
            vm_type=vm_type, storage_backing='local_image')

        host_src_evacuation, host_observer = self._prepare_test(
            vm_test, vm_observer, get_hosts.copy(), with_router=True)
        time.sleep(60)
        with_router_kpi = vm_helper.get_traffic_loss_duration_on_operation(
            vm_test, vm_observer, operation, vm_test, host_src_evacuation)
        assert with_router_kpi > 0, "Traffic loss duration is not properly detected"
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                                  kpi_name=Evacuate.NAME.format(
                                      vm_type, 'with'),
                                  kpi_val=with_router_kpi / 1000,
                                  uptime=5)

        host_helper.wait_for_hosts_ready(hosts=host_src_evacuation)

        if len(get_hosts) > 2:
            host_src_evacuation, host_observer = self._prepare_test(
                vm_test, vm_observer, get_hosts.copy(), with_router=False)
            time.sleep(60)
            without_router_kpi = vm_helper.get_traffic_loss_duration_on_operation(
                vm_test, vm_observer, operation, vm_test, host_src_evacuation)
            assert without_router_kpi > 0, "Traffic loss duration is not properly detected"
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                                      kpi_name=Evacuate.NAME.format(
                                          vm_type, 'no'),
                                      kpi_val=without_router_kpi / 1000,
                                      uptime=5)
Example #10
0
def add_admin_role(request):

    if not system_helper.is_avs():
        skip("vshell commands unsupported by OVS")

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()
    tenants = [primary_tenant, other_tenant]
    res = []
    for auth_info in tenants:
        code = keystone_helper.add_or_remove_role(add_=True, role='admin', user=auth_info.get('user'),
                                                  project=auth_info.get('tenant'))[0]
        res.append(code)

    def remove_admin_role():
        for i in range(len(res)):
            if res[i] != -1:
                auth_info_ = tenants[i]
                keystone_helper.add_or_remove_role(add_=False, role='admin', user=auth_info_.get('user'),
                                                   project=auth_info_.get('tenant'))

    request.addfinalizer(remove_admin_role)
def get_vif_type():
    return 'avp' if system_helper.is_avs() else None
Example #12
0
def skip_4k_for_ovs(mempage_size):
    if mempage_size in (None, 'any', 'small') and not system_helper.is_avs():
        skip("4K VM is only supported by AVS")
Example #13
0
def _get_stress_ng_heat(con_ssh=None):
    """
    copy the cloud-config userdata to TiS server.
    This userdata adds sysadmin/li69nux user to guest

    Args:
        con_ssh (SSHClient):

    Returns (str): TiS filepath of the userdata

    """
    file_dir = StxPath.CUSTOM_HEAT_TEMPLATES
    file_name = HeatTemplate.STRESS_NG
    file_path = file_dir + file_name

    if con_ssh is None:
        con_ssh = ControllerClient.get_active_controller()

    if not con_ssh.file_exists(file_path=file_path):
        LOG.debug('Create userdata directory if not already exists')
        cmd = 'mkdir -p {}'.format(file_dir)
        con_ssh.exec_cmd(cmd, fail_ok=False)

        source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name

        dest_path = common.scp_from_test_server_to_active_controller(
            source_path=source_file,
            dest_dir=file_dir,
            dest_name=file_name,
            con_ssh=con_ssh)

        if dest_path is None:
            raise exceptions.CommonError(
                "Heat template file {} does not exist after download".format(
                    file_path))

    # tenant nets names were hardcoded in heat file. They need to be updated when systems don't have those networks.
    # Update heat file if less than 3 tenant-nets configured.
    tenant_nets = network_helper.get_tenant_net_ids(field='name')
    net_count = len(tenant_nets)
    if net_count <= 3:
        LOG.info(
            "Less than 3 tenant networks configured. Update heat template.")
        con_ssh.exec_cmd("sed -i 's/tenant2-net3/tenant2-net{}/g' {}".format(
            net_count - 1, file_path))
        if net_count <= 2:
            con_ssh.exec_cmd(
                "sed -i 's/tenant2-net2/tenant2-net{}/g' {}".format(
                    net_count - 1, file_path))
            if net_count <= 1:
                con_ssh.exec_cmd(
                    "sed -i 's/tenant2-net1/tenant2-net{}/g' {}".format(
                        net_count - 1, file_path))

    # update heat file for multi-region system
    from consts.proj_vars import ProjVar
    from consts.stx import MULTI_REGION_MAP
    region = ProjVar.get_var("REGION")
    if region != 'RegionOne' and region in MULTI_REGION_MAP:
        region_str = MULTI_REGION_MAP.get(region)
        con_ssh.exec_cmd("sed -i 's/tenant2-net/tenant2{}-net/g' {}".format(
            region_str, file_path))
        con_ssh.exec_cmd(
            "sed -i 's/tenant2-mgmt-net/tenant2{}-mgmt-net/g' {}".format(
                region_str, file_path))

    if not system_helper.is_avs():
        con_ssh.exec_cmd("sed -i 's/avp/virtio/g' {}".format(file_path))

    return file_path
Example #14
0
def test_vm_numa_node_settings(vcpus, numa_nodes, numa_node0, numa_node1,
                               no_simplex, check_numa_num):
    """
    Test NUMA nodes settings in flavor extra specs are successfully applied to a vm

    Args:
        vcpus (int): Number of vcpus to set when creating flavor
        numa_nodes (int): Number of NUMA nodes to set in flavor extra specs
        numa_node0 (int): node.0 value in flavor extra specs
        numa_node1 (int): node.1 value in flavor extra specs

    Test Steps:
        - Create a flavor with given number of vcpus specified
        - Add numa_nodes related extra specs
        - Boot a vm with flavor
        - Run vm-topology
        - Verify vcpus, numa nodes, cpulist for specific vm reflects the settings in flavor
        - Ensure that all virtual NICs are associated with guest virtual numa node 0 (tests TC5069)

    Teardown:
        - Delete created vm, volume, and flavor

    """
    if check_numa_num < numa_nodes:
        skip("Number of processors - {} is less than required numa nodes - {}".
             format(check_numa_num, numa_nodes))

    LOG.tc_step("Create flavor with {} vcpus".format(vcpus))
    flavor = nova_helper.create_flavor('numa_vm', vcpus=vcpus)[1]
    ResourceCleanup.add('flavor', flavor, scope='function')

    extra_specs = {
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.NUMA_NODES: numa_nodes,
        FlavorSpec.NUMA_0: numa_node0
    }
    if numa_node1 is not None:
        extra_specs[FlavorSpec.NUMA_1] = numa_node1

    LOG.tc_step("Set following extra specs for flavor {}: {}.".format(
        extra_specs, flavor))
    nova_helper.set_flavor(flavor, **extra_specs)

    LOG.tc_step("Boot vm with flavor {}.".format(flavor))
    vm_id = vm_helper.boot_vm(flavor=flavor, cleanup='function')[1]

    LOG.tc_step("Verify cpu info for vm {} via vm-topology.".format(vm_id))
    nova_tab, libvirt_tab = system_helper.get_vm_topology_tables(
        'servers', 'libvirt')

    # Filter out the line for vm under test
    nova_tab = table_parser.filter_table(nova_tab, ID=vm_id)
    libvirt_tab = table_parser.filter_table(libvirt_tab, uuid=vm_id)

    instance_topology = table_parser.get_column(nova_tab,
                                                'instance_topology')[0]
    cpulist = table_parser.get_column(libvirt_tab, 'cpulist')[0]
    if '-' in cpulist:
        cpulist = cpulist.split(sep='-')
        cpulist_len = int(cpulist[1]) - int(cpulist[0]) + 1
    else:
        cpulist_len = len(cpulist.split(sep=','))
    vcpus_libvirt = int(table_parser.get_column(libvirt_tab, 'vcpus')[0])
    nodelist = table_parser.get_column(libvirt_tab, 'nodelist')[0]

    if isinstance(instance_topology, str):
        instance_topology = [instance_topology]

    # Each numa node will have an entry for given instance, thus number of entries should be the same as number of
    # numa nodes for the vm
    assert numa_nodes == len(instance_topology), \
        "Number of numa node entries for vm {} is different than number of NUMA nodes set in flavor".format(vm_id)

    expected_node_vals = [
        int(val) for val in [numa_node0, numa_node1] if val is not None
    ]
    actual_node_vals = []
    for actual_node_info in instance_topology:
        actual_node_val = int(
            re.findall(InstanceTopology.NODE, actual_node_info)[0])
        actual_node_vals.append(actual_node_val)

    assert expected_node_vals == actual_node_vals, \
        "Individual NUMA node value(s) for vm {} is different than numa_node setting in flavor".format(vm_id)

    assert vcpus == vcpus_libvirt, \
        "Number of vcpus for vm {} in libvirt view is different than what's set in flavor.".format(vm_id)

    assert vcpus == cpulist_len, \
        "Number of entries in cpulist for vm {} in libvirt view is different than number of vcpus set in flavor".format(
                vm_id)

    if '-' in nodelist:
        nodelist = nodelist.split(sep='-')
        nodelist_len = int(nodelist[1]) - int(nodelist[0]) + 1
    else:
        nodelist_len = 1 if nodelist else 0

    assert numa_nodes == nodelist_len, \
        "nodelist for vm {} in libvirt view does not match number of numa nodes set in flavor".format(vm_id)

    if system_helper.is_avs():
        # TC5069
        LOG.tc_step(
            "Check via vshell that all vNICs are associated with the host NUMA node that guest numa0 maps to"
        )
        host = vm_helper.get_vm_host(vm_id)
        actual_ports = network_helper.get_ports(vm_id)
        with host_helper.ssh_to_host(host) as compute_ssh:
            for port_id in actual_ports:
                ports_tab = table_parser.table(
                    compute_ssh.exec_cmd("vshell port-show {}".format(port_id),
                                         fail_ok=False)[1])
                socket_id = int(
                    table_parser.get_value_two_col_table(ports_tab,
                                                         field='socket-id'))
                assert socket_id == numa_node0, "NIC is not associated with numa-node0"
Example #15
0
def avs_required():
    if not system_helper.is_avs():
        skip('Test unsupported by OVS')
Example #16
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
Example #17
0
def skip_4k_for_ovs(mempage_size):
    if mempage_size in (None, 'any', 'small') and not system_helper.is_avs():
        skip("4K VM is unsupported by OVS by default")
Example #18
0
def pb_launch_vms(con_ssh, image_ids, backup_info=None):
    """
    Launch VMs before doing System Backup

    Args
        con_ssh:
            - current ssh connection

        image_ids:
            - IDs of images, for which boot-from-image VMs will be launched

        backup_info:
            - options for doing System Backup

    Return:
        VMs created
    """

    vms_added = []

    if not image_ids:
        LOG.warn('No images to backup, backup_info:{}'.format(backup_info))
    else:
        LOG.info('-currently active images: {}'.format(image_ids))
        properties = ['name', 'status', 'visibility']
        for image_id in image_ids:
            name, status, visibility = glance_helper.get_image_properties(
                image_id, properties)
            if status == 'active' and name and 'centos-guest' in name:
                vm_type = 'virtio'
                LOG.info(
                    'launch VM of type:{} from image:{}, image-id:{}'.format(
                        vm_type, name, image_id))
                vms_added += vm_helper.launch_vms(
                    vm_type,
                    image=image_id,
                    boot_source='image',
                    auth_info=Tenant.get('tenant1'),
                    con_ssh=con_ssh)[0]
                LOG.info('-OK, 1 VM from image boot up {}'.format(
                    vms_added[-1]))
                break
            else:
                LOG.info('skip booting VMs from image:{}, id:{}'.format(
                    name, image_id))

    vm_types = ['virtio']
    if system_helper.is_avs(con_ssh=con_ssh):
        vm_types += ['vswitch', 'dpdk', 'vhost']

    LOG.info('-launch VMs for different types:{}'.format(vm_types))

    LOG.info('-first make sure we have enough quota')
    vm_count = len(vms_added) + len(vm_types)
    adjust_vm_quota(vm_count, con_ssh, backup_info=backup_info)

    for vm_type in vm_types:
        vms_added += vm_helper.launch_vms(vm_type,
                                          auth_info=Tenant.get('tenant1'),
                                          con_ssh=con_ssh)[0]

    vms_added.append(
        vm_helper.boot_vm(auth_info=Tenant.get('tenant1'), con_ssh=con_ssh)[1])

    return vms_added
Example #19
0
def test_port_trunking():
    """
    Port trunking feature test cases

    Test Steps:
        - Create networks
        - Create subnets
        - Create a parent port and subports
        - Create a truck with parent port and subports
        - Boot the first vm with the trunk
        - Create the second trunk without subport
        - Boot the second vm
        - Add support to trunk
        - Configure vlan interfaces inside guests
        - Verify connectivity via vlan interfaces
        - Remove the subport from trunk and verify connectivity
        - Add the support to trunk and verify connectivity
        - Do vm actions and verify connectivity


    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    vif_model = 'avp' if system_helper.is_avs() else None
    network_names = ['network11', 'network12', 'network13']
    net_ids = []
    sub_nets = ["30.0.0.0/24", "30.0.1.0/24", "30.0.2.0/24"]
    subnet_ids = []
    # parent ports and sub ports for trunk 1 and trunk 2
    trunk1_parent_port = 'vrf10'
    trunk1_subport_1 = 'vrf11'
    trunk1_subport_2 = 'vrf12'

    trunk2_parent_port = 'host10'
    trunk2_subport_1 = 'host11'
    trunk2_subport_2 = 'host12'

    # vlan id for the subports
    segment_1 = 1
    segment_2 = 2

    LOG.tc_step("Create Networks to be used by trunk")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    # Create Trunks
    LOG.tc_step("Create Parent port for trunk 1")
    t1_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk1_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t1_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk1_parent_port)[0]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk1_subport_1,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk1_subport_2,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t1_sub_ports = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t1_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 1")
    trunk1_id = network_helper.create_trunk(t1_parent_port_id,
                                            name='trunk-1',
                                            sub_ports=t1_sub_ports,
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{'net-id': mgmt_net_id}, {'port-id': t1_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm_id = vm_helper.boot_vm(name='vm-with-trunk1-port',
                              nics=nics,
                              cleanup='function')[1]
    LOG.tc_step("Setup vlan interfaces inside guest")
    _bring_up_vlan_interface(vm_id, 'eth1', [segment_1])

    # Create second trunk port  with out the subports and vm
    LOG.tc_step("Create Parent port for trunk 2")
    t2_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk2_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t2_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk2_parent_port)[0]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk2_subport_1,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk2_subport_2,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t2_sub_ports = [{
        'port': t2_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t2_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 2")
    trunk2_id = network_helper.create_trunk(t2_parent_port_id,
                                            name='trunk-2',
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics_2 = [{'net-id': mgmt_net_id}, {'port-id': t2_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm2_id = vm_helper.boot_vm(name='vm-with-trunk2-port',
                               nics=nics_2,
                               cleanup='function')[1]

    LOG.tc_step("Add the sub ports to the second truck")
    network_helper.set_trunk(trunk2_id, sub_ports=t2_sub_ports)

    LOG.tc_step("Setup VLAN interfaces inside guest")
    _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

    # ping b/w 2 vms using the vlan interfaces
    eth_name = 'eth1.1'

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = network_helper.get_ip_for_eth(eth_name=eth_name,
                                                ssh_client=vm_ssh)

    if ip_addr:
        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step("Ping on vlan interface from guest")
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

    # unset the subport on trunk_1 and try the ping (it will fail)
    LOG.tc_step(
        "Removing a subport from trunk and ping on vlan interface inside guest"
    )
    ret_code_10 = network_helper.unset_trunk(trunk1_id,
                                             sub_ports=[t1_sub_port1_id])[0]
    assert ret_code_10 == 0, "Subports not removed as expected."

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        ping = network_helper.ping_server(ip_addr,
                                          ssh_client=vm2_ssh,
                                          num_pings=20,
                                          fail_ok=True)[0]
        assert ping == 100, "Ping did not fail as expected."

    # set the subport on trunk_1 and try the ping (it will work)
    LOG.tc_step(
        " Add back the subport to trunk and ping on vlan interface inside guest"
    )
    t1_sub_port = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }]
    network_helper.set_trunk(trunk1_id, sub_ports=t1_sub_port)

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    # VM operation and ping
    for vm_actions in [['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm2_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm2_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management networks still works "
            "after {}".format(vm_actions))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm2_id,
                                   net_types=['mgmt'])

        if vm_actions[0] == 'cold_migrate':
            LOG.tc_step("Setup VLAN interfaces inside guest")
            _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step(
                "Ping on vlan interface from guest after action {}".format(
                    vm_actions))
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

        vm_host = vm_helper.get_vm_host(vm2_id)

        vm_on_target_host = vm_helper.get_vms_on_host(vm_host)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm2_id, ping_vms=True)

    for vm_id_on_target_host in vm_on_target_host:
        LOG.tc_step("Setup VLAN interfaces inside guest")
        _bring_up_vlan_interface(vm_id_on_target_host, 'eth1', [segment_1])

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest after evacuation")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    LOG.tc_step(
        "Attempt to delete trunk when in use, expect pass for AVS only")
    code = network_helper.delete_trunks(trunks=trunk1_id, fail_ok=True)[0]

    if system_helper.is_avs():
        assert 0 == code, "Failed to delete port trunk when it's used by a running VM wiht AVS"
    else:
        assert 1 == code, "Trunk is deleted when it's used by a running VM with OVS"
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern):
    """
    Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image

    Args:
        vol_vif (str)

    Test Steps:
        - Create cinder volume
        - Boot VM use WRL image
        - Attach cinder volume to WRL virtio/avp instance
        - Check VM nics vifs are not changed

    Teardown:
        - Delete VM
        - Delete cinder volume
    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else 'virtio'
    nics = [
        {
            'net-id': mgmt_net_id
        },
        {
            'net-id': tenant_net_id
        },
        {
            'net-id': internal_net_id,
            'vif-model': vif_model
        },
    ]

    LOG.tc_step("Boot up VM from default tis image")
    vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif),
                              source='image',
                              nics=nics,
                              cleanup='function')[1]

    prev_ports = network_helper.get_ports(server=vm_id)

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(vol_vif))
    img_id = glance_helper.create_image('vif_{}'.format(vol_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           vol_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    # boot a cinder volume and attached it to vm
    LOG.tc_step("Attach cinder Volume to VM")
    vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id)

    LOG.tc_step("Check vm nics vif models are not changed")
    post_ports = network_helper.get_ports(server=vm_id)

    assert prev_ports == post_ports
Example #21
0
def check_avs_pattern():
    if not system_helper.is_avs():
        skip('avp vif required by dpdk/vhost vm is unsupported by OVS')
def test_vif_model_from_image(img_vif, check_avs_pattern):
    """
    Test vif model set in image metadata is reflected in vm nics when use normal vnic type.
    Args:
        img_vif (str):
        check_avs_pattern:

    Test Steps:
        - Create a glance image with given img_vif in metadata
        - Create a cinder volume from above image
        - Create a vm with 3 vnics from above cinder volume:
            - nic1 and nic2 with normal vnic type
            - nic3 with avp (if AVS, otherwise normal)
        - Verify nic1 and nic2 vif model is the same as img_vif
        - Verify nic3 vif model is avp (if AVS, otherwise normal)

    """

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(img_vif))
    img_id = glance_helper.create_image('vif_{}'.format(img_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           img_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(img_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else img_vif
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id,
        'vif-model': vif_model
    }]

    LOG.tc_step(
        "Boot a vm from above volume with following nics: {}".format(nics))
    vm_id = vm_helper.boot_vm(name='vif_img_{}'.format(img_vif),
                              nics=nics,
                              source='volume',
                              source_id=volume_id,
                              cleanup='function')[1]

    LOG.tc_step(
        "Verify vnics info from virsh to ensure tenant net vif is as specified in image metadata"
    )
    internal_mac = network_helper.get_ports(server=vm_id,
                                            network=internal_net_id,
                                            field='MAC Address')[0]
    vm_interfaces = vm_helper.get_vm_interfaces_via_virsh(vm_id)
    for vm_if in vm_interfaces:
        if_mac, if_model = vm_if
        if if_mac == internal_mac:
            assert if_model == vif_model
        else:
            assert if_model == img_vif
Example #23
0
def test_vm_with_max_vnics_attached_during_boot(base_vm, guest_os, nic_arg,
                                                boot_source):
    """
    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with 1 mgmt and 15 avp/virtio Interfaces
        - Perform nova action (live migrate --force, live migrate, rebuild, reboot hard/soft, resize revert, resize)
        - ping between base_vm and vm_under_test over mgmt & tenant network

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    vif_type = 'avp' if system_helper.is_avs() else None

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'function'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    # TODO Update vif model config. Right now vif model avp still under implementation
    nics = [mgmt_nic]
    for i in range(15):
        if nic_arg == 'port_id':
            port_id = network_helper.create_port(tenant_net_id,
                                                 'tenant_port-{}'.format(i),
                                                 wrs_vif=vif_type,
                                                 cleanup='function')[1]
            nics.append({'port-id': port_id})
        else:
            nics.append({'net-id': tenant_net_id, 'vif-model': vif_type})

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with 1 mgmt and 15 data interfaces".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('max_vifs-{}-{}'.format(
        guest_os, boot_source),
                                      nics=nics,
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    expt_vnics = 16
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    _ping_vm_data(vm_under_test, vm_under_test, action='boot')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='configure routes')

    destination_host = vm_helper.get_dest_host_for_live_migrate(
        vm_id=vm_under_test)
    if destination_host:
        # LOG.tc_step("Perform following action(s) on vm {}: {}".format(vm_under_test, 'live-migrate --force'))
        # vm_helper.live_migrate_vm(vm_id=vm_under_test, destination_host=destination_host, force=True)
        # _ping_vm_data(vm_under_test, base_vm_id, action='live migrate --force')

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_under_test, 'live-migrate'))
        vm_helper.live_migrate_vm(vm_id=vm_under_test)
        _ping_vm_data(vm_under_test, base_vm_id, action='live-migrate')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'hard reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test, hard=True)
    _ping_vm_data(vm_under_test, base_vm_id, action='hard reboot')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'soft reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='soft rebuild')

    LOG.tc_step('Create destination flavor')
    dest_flavor_id = nova_helper.create_flavor(name='dest_flavor',
                                               vcpus=2,
                                               guest_os=guest_os)[1]

    LOG.tc_step('Resize vm to dest flavor and revert')
    vm_helper.resize_vm(vm_under_test,
                        dest_flavor_id,
                        revert=True,
                        fail_ok=False)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize revert')

    LOG.tc_step('Resize vm to dest flavor and revert False')
    vm_helper.resize_vm(vm_under_test, dest_flavor_id, fail_ok=False)
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'rebuild'))
    vm_helper.rebuild_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, vm_under_test, action='rebuild')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='rebuild')
Example #24
0
def test_backup(pre_system_backup):
    """
    Test create backup on the system and it's avaliable and in-use volumes.
    copy backup files to USB flash drive

    Args:


    Setup:
        - create system backup use config_controller (create system,image tgz)
        - backup image separately if its storage lab that use CEPH
        - back up all available and in-use volumes from the lab

    Test Steps:
        - check system and img tgz are created for system backup
        - check all images are back up in storage
        - check all volumes tgz are created for backup

    Teardown:
        - Delete vm if booted
        - Delete created flavor (module)

    """

    backup_info = pre_system_backup
    LOG.info('Before backup, perform configuration changes and launch VMs')

    con_ssh = ControllerClient.get_active_controller()
    backup_info['con_ssh'] = con_ssh

    is_ceph = backup_info.get('is_storage_lab', False)
    LOG.debug('This is a {} lab'.format(
        'Storage/Ceph' if is_ceph else 'Non-Storage/Ceph'))

    if is_ceph:
        con_ssh.exec_sudo_cmd('touch /etc/ceph/ceph.client.None.keyring')
        pre_backup_test(backup_info, con_ssh)

    lab = InstallVars.get_install_var('LAB')
    LOG.tc_step(
        "System backup: lab={}; backup dest = {} backup destination path = {} ..."
        .format(lab['name'], backup_info['backup_dest'],
                backup_info['backup_dest_full_path']))
    copy_to_usb = None
    usb_part2 = None

    backup_dest = backup_info['backup_dest']
    if backup_dest == 'usb':
        usb_partition_info = backup_info['usb_parts_info']
        for k, v in usb_partition_info.items():
            if k[-1:] == "1":
                pass
                # usb_part1 = k
            elif k[-1:] == '2':
                usb_part2 = k
        copy_to_usb = usb_part2

    backup_info['copy_to_usb'] = copy_to_usb
    backup_info['backup_file_prefix'] = get_backup_file_name_prefix(
        backup_info)
    backup_info['cinder_backup'] = BackupVars.get_backup_var('cinder_backup')
    reinstall_storage = BackupVars.get_backup_var('reinstall_storage')

    if reinstall_storage:
        if is_ceph:
            backup_cinder_volumes(backup_info)

        backup_sysconfig_images(backup_info)
    else:
        # if is_ceph:
        #     backup_cinder_volumes(backup_info)

        backup_sysconfig_images(backup_info)

    collect_logs('after_backup')

    if system_helper.is_avs(con_ssh=con_ssh):
        # Copying system backup ISO file for future restore
        assert backup_load_iso_image(backup_info)
Example #25
0
def test_interface_attach_detach_max_vnics(guest_os, if_attach_arg, vifs,
                                           check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach to maximum vnics

    Setups:
        - Boot a base vm with mgmt net and internal0-net1   (module)

    Test Steps:
        - Boot a vm with only mgmt interface
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - Perform VM action - Cold migrate, live migrate, pause resume, suspend resume
        - Verify ping between base_vm and vm_under_test over mgmt & tenant network after vm operation
        - detach all the tenant interface
        - Repeat attach/detach after performing each vm action

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """
    if guest_os == 'vxworks' and not system_helper.is_avs():
        skip('e1000 vif unsupported by OVS')

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm

    glance_vif = None
    if not (if_attach_arg == 'port_id' and system_helper.is_avs()):
        for vif in vifs:
            if vif[0] in ('e1000', 'rtl8139'):
                glance_vif = vif[0]
                break

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if (not glance_vif and re.search(
        GuestImages.TIS_GUEST_PATTERN, guest_os)) else 'function'
    image_id = glance_helper.get_guest_image(
        guest_os=guest_os,
        cleanup=cleanup,
        use_existing=False if cleanup else True)

    if glance_vif:
        glance_helper.set_image(image_id,
                                hw_vif_model=glance_vif,
                                new_name='{}_{}'.format(guest_os, glance_vif))

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=1,
                                          guest_os=guest_os,
                                          cleanup='function')[1]

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    code, vol_id = cinder_helper.create_volume(name='vol-' + guest_os,
                                               source_id=image_id,
                                               fail_ok=True,
                                               guest_image=guest_os,
                                               cleanup='function')
    assert 0 == code, "Issue occurred when creating volume"
    source_id = vol_id

    LOG.tc_step("Boot a vm with mgmt nic only")
    vm_under_test = vm_helper.boot_vm(name='if_attach_tenant',
                                      nics=[mgmt_nic],
                                      source_id=source_id,
                                      flavor=flavor_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]
    prev_port_count = 1
    for vm_actions in [['live_migrate'], ['cold_migrate'],
                       ['pause', 'unpause'], ['suspend', 'resume'],
                       ['stop', 'start']]:
        tenant_port_ids = []
        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Attach specified vnics to the VM before {} and bring up interfaces"
                .format(vm_actions))
            expt_vnics = 1
            for vif in vifs:
                vif_model, vif_count = vif
                expt_vnics += vif_count
                LOG.info("iter {}".format(vif_count))
                for i in range(vif_count):
                    if if_attach_arg == 'port_id':
                        vif_model = vif_model if system_helper.is_avs(
                        ) else None
                        port = network_helper.create_port(
                            net_id=tenant_net_id,
                            wrs_vif=vif_model,
                            cleanup='function',
                            name='attach_{}_{}'.format(vif_model, i))[1]
                        kwargs = {'port_id': port}
                    else:
                        kwargs = {'net_id': tenant_net_id}
                    tenant_port_id = vm_helper.attach_interface(
                        vm_under_test, **kwargs)[1]
                    tenant_port_ids.append(tenant_port_id)
                LOG.info(
                    "Attached new vnics to the VM {}".format(tenant_port_ids))

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            LOG.info("vnics attached to VM: {}".format(vm_ports_count))
            assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

            LOG.info(
                "Bring up all the attached new vifs {} on tenant net from vm".
                format(vifs))
            _bring_up_attached_interface(vm_under_test,
                                         ports=tenant_port_ids,
                                         guest_os=guest_os,
                                         base_vm=base_vm_id)

            if expt_vnics == 16:
                LOG.tc_step(
                    "Verify no more vnic can be attached after reaching upper limit 16"
                )
                res = vm_helper.attach_interface(vm_under_test,
                                                 net_id=tenant_net_id,
                                                 fail_ok=True)[0]
                assert res == 1, "vnics attach exceed maximum limit"

        if vm_actions[0] == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, then verify ping from "
                "base vm over management and data networks")
            vm_helper.set_vm_state(vm_id=vm_under_test,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
            # if 'vxworks' not in guest_os:
            #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)
        else:
            LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                vm_under_test, vm_actions))
            for action in vm_actions:
                vm_helper.perform_action_on_vm(vm_under_test, action=action)
                if action == 'cold_migrate' or action == 'start':
                    LOG.tc_step(
                        "Bring up all the attached tenant interface from vm after {}"
                        .format(vm_actions))
                    # if 'vxworks' not in guest_os:
                    #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_id,
                                       net_types=['mgmt', 'data'],
                                       retry=10)

            LOG.tc_step("Detach all attached interface {} after {}".format(
                tenant_port_ids, vm_actions))
            for tenant_port_id in tenant_port_ids:
                vm_helper.detach_interface(vm_id=vm_under_test,
                                           port_id=tenant_port_id,
                                           cleanup_route=True)

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            assert prev_port_count == vm_ports_count, "VM ports still listed after interface-detach"
            res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                             from_vm=vm_under_test,
                                             fail_ok=True,
                                             net_types=['data'],
                                             retry=0)[0]
            assert not res, "Detached interface still works"
Example #26
0
def check_avs_pattern(stx_openstack_required):
    if not system_helper.is_avs():
        skip("4k vm unsupported by OVS-dpdk")