示例#1
0
def pci_prep():
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()
    primary_tenant_name = primary_tenant['tenant']
    vm_helper.set_quotas(tenant=primary_tenant_name, cores=100)
    vm_helper.set_quotas(tenant=other_tenant['tenant'], cores=100)
    return primary_tenant, primary_tenant_name, other_tenant
示例#2
0
def snat_setups(request):
    find_dvr = 'True' if request.param == 'distributed' else 'False'

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info in [primary_tenant, other_tenant]:
        tenant_router = network_helper.get_tenant_router(auth_info=auth_info)
        is_dvr_router = network_helper.get_router_values(router_id=tenant_router,
                                                         fields='distributed')[0]
        if find_dvr == str(is_dvr_router):
            LOG.fixture_step("Setting primary tenant to {}".format(common.get_tenant_name(auth_info)))
            Tenant.set_primary(auth_info)
            break
    else:
        skip("No {} router found on system.".format(request.param))

    LOG.fixture_step("Update router to enable SNAT")
    network_helper.set_router_gateway(enable_snat=True)     # Check snat is handled by the keyword

    def disable_snat():
        LOG.fixture_step("Disable SNAT on tenant router")
        try:
            network_helper.set_router_gateway(enable_snat=False)
        finally:
            LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant']))
            Tenant.set_primary(primary_tenant)
    request.addfinalizer(disable_snat)

    LOG.fixture_step("Boot a VM from volume")
    vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False, cleanup='module')[1]

    if system_helper.is_avs():
        LOG.fixture_step("Attempt to ping from NatBox and ensure if fails")
        ping_res = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, fail_ok=True, use_fip=False)
        assert ping_res is False, "VM can still be ping'd from outside after SNAT enabled without floating ip."

    LOG.fixture_step("Create a floating ip and associate it to VM")
    floatingip = network_helper.create_floating_ip(cleanup='module')[1]
    network_helper.associate_floating_ip_to_vm(floatingip, vm_id)

    LOG.fixture_step("Ping vm's floating ip from NatBox and ensure it's reachable")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, use_fip=True)

    return vm_id, floatingip
示例#3
0
def add_admin_role(request):

    if not system_helper.is_avs():
        skip("vshell commands unsupported by OVS")

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()
    tenants = [primary_tenant, other_tenant]
    res = []
    for auth_info in tenants:
        code = keystone_helper.add_or_remove_role(add_=True, role='admin', user=auth_info.get('user'),
                                                  project=auth_info.get('tenant'))[0]
        res.append(code)

    def remove_admin_role():
        for i in range(len(res)):
            if res[i] != -1:
                auth_info_ = tenants[i]
                keystone_helper.add_or_remove_role(add_=False, role='admin', user=auth_info_.get('user'),
                                                   project=auth_info_.get('tenant'))

    request.addfinalizer(remove_admin_role)
示例#4
0
def pre_alarms_session():
    if container_helper.is_stx_openstack_deployed():
        from keywords import network_helper
        for auth_info in (Tenant.get_primary(), Tenant.get_secondary()):
            project = auth_info['tenant']
            default_group = network_helper.get_security_groups(
                auth_info=auth_info, name='default', strict=True)
            if not default_group:
                LOG.info(
                    "No default security group for {}. Skip security group "
                    "rule config.".format(project))
                continue

            default_group = default_group[0]
            security_rules = network_helper.get_security_group_rules(
                auth_info=auth_info,
                **{
                    'IP Protocol': ('tcp', 'icmp'),
                    'Security Group': default_group
                })
            if len(security_rules) >= 2:
                LOG.info(
                    "Default security group rules for {} already configured "
                    "to allow ping and ssh".format(project))
                continue

            LOG.info(
                "Create icmp and ssh security group rules for {} with best "
                "effort".format(project))
            for rules in (('icmp', None), ('tcp', 22)):
                protocol, dst_port = rules
                network_helper.create_security_group_rule(group=default_group,
                                                          protocol=protocol,
                                                          dst_port=dst_port,
                                                          fail_ok=True,
                                                          auth_info=auth_info)

    return __get_alarms('session')
示例#5
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
示例#6
0
def test_kpi_vm_launch_migrate_rebuild(ixia_required, collect_kpi, hosts_per_backing, boot_from):
    """
    KPI test  - vm startup time.
    Args:
        collect_kpi:
        hosts_per_backing
        boot_from

    Test Steps:
        - Create a flavor with 2 vcpus, dedicated cpu policy and storage backing (if boot-from-image)
        - Launch a vm from specified boot source
        - Collect the vm startup time via event log

    """
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled.")

    # vm launch KPI
    if boot_from != 'volume':
        storage_backing = boot_from
        hosts = hosts_per_backing.get(boot_from)
        if not hosts:
            skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(boot_from))

        target_host = hosts[0]
        LOG.tc_step("Clear local storage cache on {}".format(target_host))
        storage_helper.clear_local_storage_cache(host=target_host)

        LOG.tc_step("Create a flavor with 2 vcpus, dedicated cpu policy, and {} storage".format(storage_backing))
        boot_source = 'image'
        flavor = nova_helper.create_flavor(name=boot_from, vcpus=2, storage_backing=storage_backing)[1]
    else:
        target_host = None
        boot_source = 'volume'
        storage_backing = keywords.host_helper.get_storage_backing_with_max_hosts()[0]
        LOG.tc_step("Create a flavor with 2 vcpus, and dedicated cpu policy and {} storage".format(storage_backing))
        flavor = nova_helper.create_flavor(vcpus=2, storage_backing=storage_backing)[1]

    ResourceCleanup.add('flavor', flavor)
    nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    host_str = ' on {}'.format(target_host) if target_host else ''
    LOG.tc_step("Boot a vm from {}{} and collect vm startup time".format(boot_from, host_str))

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id},
            {'net-id': internal_net_id}]

    vm_id = vm_helper.boot_vm(boot_from, flavor=flavor, source=boot_source, nics=nics, cleanup='function')[1]

    code_boot, out_boot = \
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=VmStartup.NAME.format(boot_from),
                                  log_path=VmStartup.LOG_PATH, end_pattern=VmStartup.END.format(vm_id),
                                  start_pattern=VmStartup.START.format(vm_id), uptime=1)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    # Migration KPI
    if ('ixia_ports' in ProjVar.get_var("LAB")) and (len(hosts_per_backing.get(storage_backing)) >= 2):

        LOG.info("Run migrate tests when more than 2 {} hosts available".format(storage_backing))
        LOG.tc_step("Launch an observer vm")

        mgmt_net_observer = network_helper.get_mgmt_net_id(auth_info=Tenant.get_secondary())
        tenant_net_observer = network_helper.get_tenant_net_id(auth_info=Tenant.get_secondary())
        nics_observer = [{'net-id': mgmt_net_observer},
                         {'net-id': tenant_net_observer},
                         {'net-id': internal_net_id}]
        vm_observer = vm_helper.boot_vm('observer', flavor=flavor, source=boot_source,
                                        nics=nics_observer, cleanup='function', auth_info=Tenant.get_secondary())[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm_observer)
        vm_helper.setup_kernel_routing(vm_observer)
        vm_helper.setup_kernel_routing(vm_id)
        vm_helper.route_vm_pair(vm_observer, vm_id)

        if 'local_lvm' != boot_from:
            # live migration unsupported for boot-from-image vm with local_lvm storage
            LOG.tc_step("Collect live migrate KPI for vm booted from {}".format(boot_from))

            def operation_live(vm_id_):
                code, msg = vm_helper.live_migrate_vm(vm_id=vm_id_)
                assert 0 == code, msg
                vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
                # kernel routing
                vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

            time.sleep(30)
            duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_live, vm_id)
            assert duration > 0, "No traffic loss detected during live migration for {} vm".format(boot_from)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NAME.format(boot_from),
                                      kpi_val=duration, uptime=1, unit='Time(ms)')

            vim_duration = vm_helper.get_live_migrate_duration(vm_id=vm_id)
            kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=LiveMigrate.NOVA_NAME.format(boot_from),
                                      kpi_val=vim_duration, uptime=1, unit='Time(s)')

        LOG.tc_step("Collect cold migrate KPI for vm booted from {}".format(boot_from))

        def operation_cold(vm_id_):
            code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)

        time.sleep(30)
        duration = vm_helper.get_traffic_loss_duration_on_operation(vm_id, vm_observer, operation_cold, vm_id)
        assert duration > 0, "No traffic loss detected during cold migration for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

        vim_duration = vm_helper.get_cold_migrate_duration(vm_id=vm_id)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ColdMigrate.NOVA_NAME.format(boot_from),
                                  kpi_val=vim_duration, uptime=1, unit='Time(s)')

    # Rebuild KPI
    if 'volume' != boot_from:
        LOG.info("Run rebuild test for vm booted from image")

        def operation_rebuild(vm_id_):
            code, msg = vm_helper.rebuild_vm(vm_id=vm_id_)
            assert 0 == code, msg
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
            vm_helper.ping_vms_from_vm(vm_id, vm_id, net_types=('data', 'internal'))

        LOG.tc_step("Collect vm rebuild KPI for vm booted from {}".format(boot_from))
        time.sleep(30)
        duration = vm_helper.get_ping_loss_duration_on_operation(vm_id, 300, 0.5, operation_rebuild, vm_id)
        assert duration > 0, "No ping loss detected during rebuild for {} vm".format(boot_from)
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=Rebuild.NAME.format(boot_from),
                                  kpi_val=duration, uptime=1, unit='Time(ms)')

    # Check the vm boot result at the end after collecting other KPIs
    assert code_boot == 0, out_boot
示例#7
0
    def _prepare_test(vm1, vm2, get_hosts, with_router):
        """
        VMs:
            VM1: under test (primary tenant)
            VM2: traffic observer
        """

        vm1_host = vm_helper.get_vm_host(vm1)
        vm2_host = vm_helper.get_vm_host(vm2)
        vm1_router = network_helper.get_tenant_router(
            auth_info=Tenant.get_primary())
        vm2_router = network_helper.get_tenant_router(
            auth_info=Tenant.get_secondary())
        vm1_router_host = network_helper.get_router_host(router=vm1_router)
        vm2_router_host = network_helper.get_router_host(router=vm2_router)
        targets = list(get_hosts)

        if vm1_router_host == vm2_router_host:
            end_time = time.time() + 360
            while time.time() < end_time:
                vm1_router_host = network_helper.get_router_host(
                    router=vm1_router)
                vm2_router_host = network_helper.get_router_host(
                    router=vm2_router)
                if vm1_router_host != vm2_router_host:
                    break
            else:
                assert vm1_router_host != vm2_router_host, "two routers are located on the same compute host"

        if not with_router:
            """
            Setup:
                VM1 on COMPUTE-A
                VM2 not on COMPUTE-A
                ROUTER1 on COMPUTE-B
                ROUTER2 on COMPUTE-C
            """
            if len(get_hosts) < 3:
                skip(
                    "Lab not suitable for without_router, requires at least three hypervisors"
                )

            LOG.tc_step(
                "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute"
            )
            if vm2_host != vm2_router_host:
                vm_helper.live_migrate_vm(vm_id=vm2,
                                          destination_host=vm2_router_host)
                vm2_host = vm_helper.get_vm_host(vm2)
                assert vm2_host == vm2_router_host, "live-migration failed"
            host_observer = vm2_host

            LOG.tc_step(
                "Ensure VM1 and (ROUTER1, VM2, ROUTER2) are on different hosts"
            )
            if vm1_router_host in targets:
                # ensure vm1_router_host is not selected for vm1
                # vm1_router_host can be backed by any type of storage
                targets.remove(vm1_router_host)
            if vm2_host in targets:
                targets.remove(vm2_host)

            if vm1_host in targets:
                host_src_evacuation = vm1_host
            else:
                assert targets, "no suitable compute for vm1, after excluding ROUTER1, VM2, ROUTER2 's hosts"
                host_src_evacuation = targets[0]
                vm_helper.live_migrate_vm(vm_id=vm1,
                                          destination_host=host_src_evacuation)
                vm1_host = vm_helper.get_vm_host(vm1)
                assert vm1_host == host_src_evacuation, "live-migration failed"

            # verify setup
            vm1_host = vm_helper.get_vm_host(vm1)
            vm2_host = vm_helper.get_vm_host(vm2)
            vm1_router_host = network_helper.get_router_host(router=vm1_router)
            vm2_router_host = network_helper.get_router_host(router=vm2_router)
            assert vm1_router_host != vm1_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \
                "setup is incorrect"
        else:
            """
            Setup:
                VM1, ROUTER1 on COMPUTE-A
                VM2 not on COMPUTE-A
                ROUTER2 on COMPUTE-B 
            """
            LOG.tc_step("Ensure VM1, ROUTER1 on COMPUTE-A")

            # VM1 must be sitting on ROUTER1's host, thus vm1_router_host must be backed by local_image
            assert vm1_router_host in targets, "vm1_router_host is not backed by local_image"

            if vm1_host != vm1_router_host:
                vm_helper.live_migrate_vm(vm_id=vm1,
                                          destination_host=vm1_router_host)
                vm1_host = vm_helper.get_vm_host(vm1)
                assert vm1_host == vm1_router_host, "live-migration failed"
            host_src_evacuation = vm1_host

            LOG.tc_step(
                "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute"
            )
            targets.remove(host_src_evacuation)
            if vm2_host in targets:
                host_observer = vm2_host
            else:
                assert targets, "no suitable compute for vm2, after excluding COMPUTE-A"
                host_observer = targets[0]
                vm_helper.live_migrate_vm(vm_id=vm2,
                                          destination_host=host_observer)
                vm2_host = vm_helper.get_vm_host(vm2)
                assert vm2_host == host_observer, "live-migration failed"

            # verify setup
            vm1_host = vm_helper.get_vm_host(vm1)
            vm2_host = vm_helper.get_vm_host(vm2)
            vm1_router_host = network_helper.get_router_host(router=vm1_router)
            vm2_router_host = network_helper.get_router_host(router=vm2_router)
            assert vm1_host == vm1_router_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \
                "setup is incorrect"

        assert vm1_host == host_src_evacuation and vm2_host == host_observer, "setup is incorrect"
        LOG.info("Evacuate: VM {} on {}, ROUTER on {}".format(
            vm1, vm1_host, vm1_router_host))
        LOG.info("Observer: VM {} on {}, ROUTER on {}".format(
            vm2, vm2_host, vm2_router_host))

        return host_src_evacuation, host_observer