예제 #1
0
    def remove_shared_cpu(self, request, config_host_class):
        storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
        assert hosts, "No hypervisor in storage aggregate"

        avail_zone = None
        hosts_unconfigured = []
        for host in hosts:
            shared_cores_host = host_helper.get_host_cpu_cores_for_function(hostname=host, func='shared', thread=0)
            if shared_cores_host[0] or shared_cores_host.get(1, None):
                hosts_unconfigured.append(host)

        if not hosts_unconfigured:
            return storage_backing, avail_zone

        hosts_configured = list(set(hosts) - set(hosts_unconfigured))
        hosts_to_configure = []
        if len(hosts_configured) < 2:
            hosts_to_configure = hosts_unconfigured[:(2-len(hosts_configured))]

        for host_to_config in hosts_to_configure:
            shared_cores = host_helper.get_host_cpu_cores_for_function(host_to_config, 'shared', thread=0)
            p1_config = p1_revert = None
            if 1 in shared_cores:
                p1_config = 0
                p1_revert = len(shared_cores[1])

            def _modify(host_):
                host_helper.modify_host_cpu(host_, 'shared', p0=0, p1=p1_config)

            def _revert(host_):
                host_helper.modify_host_cpu(host_, 'shared', p0=len(shared_cores[0]), p1=p1_revert)

            config_host_class(host=host_to_config, modify_func=_modify, revert_func=_revert)
            host_helper.wait_for_hypervisors_up(host_to_config)
            hosts_configured.append(host_to_config)
            hosts_unconfigured.remove(host_to_config)

        if hosts_unconfigured:
            avail_zone = 'cgcsauto'
            LOG.fixture_step("({}) Add admin role to user under primary tenant and add configured hosts {} to "
                             "cgcsauto aggregate".format('class', hosts_configured))
            nova_helper.create_aggregate(name='cgcsauto', avail_zone='cgcsauto', check_first=True)
            code = keystone_helper.add_or_remove_role(add_=True, role='admin')[0]

            def remove_admin():
                nova_helper.delete_aggregates(avail_zone, remove_hosts=True)
                if code != -1:
                    LOG.fixture_step("({}) Remove admin role and cgcsauto aggregate".format('class'))
                    keystone_helper.add_or_remove_role(add_=False, role='admin')
            request.addfinalizer(remove_admin)

            nova_helper.add_hosts_to_aggregate(aggregate=avail_zone, hosts=hosts_configured)

        return storage_backing, avail_zone
예제 #2
0
def add_host_to_zone(request, add_cgcsauto_zone, add_admin_role_module):
    nova_zone_hosts = host_helper.get_up_hypervisors()
    host_to_add = nova_zone_hosts[0]
    nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto', hosts=host_to_add)

    def remove_host_from_zone():
        nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                check_first=False)

    request.addfinalizer(remove_host_from_zone)

    return host_to_add
예제 #3
0
def add_host_to_zone(request, get_hosts_with_backing, add_cgcsauto_zone):
    storage_backing, host_under_test = get_hosts_with_backing
    nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto',
                                       hosts=host_under_test)

    def remove_host_from_zone():
        nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                check_first=False)

    request.addfinalizer(remove_host_from_zone)

    return storage_backing, host_under_test
예제 #4
0
def add_hosts_to_zone(request, add_admin_role_class, add_cgcsauto_zone, reserve_unreserve_all_hosts_module):
    storage_backing, target_hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    if len(target_hosts) < 2:
        skip("Less than two up hosts have same storage backing")

    LOG.fixture_step("Update instance and volume quota to at least 10 and 20 respectively")
    vm_helper.ensure_vms_quotas()

    hosts_to_add = target_hosts[:2]
    nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto', hosts=hosts_to_add)

    def remove_hosts_from_zone():
        nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto', check_first=False)
    request.addfinalizer(remove_hosts_from_zone)

    return storage_backing, hosts_to_add
예제 #5
0
    def sriov_prep(self, request, pci_prep, add_cgcsauto_zone):
        primary_tenant, primary_tenant_name, other_tenant = pci_prep
        vif_model = 'pci-sriov'

        net_type, pci_net, pci_net_id, pnet_id, pnet_name = get_pci_net(
            request, vif_model, primary_tenant, primary_tenant_name,
            other_tenant)

        LOG.fixture_step(
            "Calculate number of vms and number of vcpus for each vm")
        pci_hosts = get_pci_hosts(vif_model, pnet_name)

        # TODO: nova provider-show deprecated. Update required.
        # vfs_conf, vfs_use_init = nova_helper.get_pci_interface_stats_for_providernet(
        #         pnet_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

        # # TODO vfs configured per host is inaccurate when hosts are configured differently
        # vfs_conf_per_host = vfs_conf/len(pci_hosts)
        # if vfs_conf_per_host < 4:
        #     skip('Less than 4 {} interfaces configured on each host'.format(vif_model))
        pci_hosts = pci_hosts[:2]
        vm_num = 4
        vfs_use_init = None
        # vm_num = min(4, int(vfs_conf_per_host / 4) * 2)

        initial_host, min_cores_per_proc = get_host_with_min_vm_cores_per_proc(
            pci_hosts)
        other_host = pci_hosts[0] if initial_host == pci_hosts[
            1] else pci_hosts[1]
        vm_vcpus = int(min_cores_per_proc / (vm_num / 2))

        def remove_host_from_zone():
            LOG.fixture_step(
                "Remove {} hosts from cgcsauto zone".format(vif_model))
            nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                    check_first=False)

        request.addfinalizer(remove_host_from_zone)

        LOG.fixture_step("Add {} hosts to cgcsauto zone: {}".format(
            vif_model, pci_hosts))
        nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto',
                                           hosts=pci_hosts)

        nics = get_pci_vm_nics(vif_model, pci_net_id)

        return net_type, pci_net, pci_hosts, pnet_id, nics, initial_host, other_host, vfs_use_init, vm_num, vm_vcpus
예제 #6
0
    def get_zone(self, request, add_stxauto_zone):
        if system_helper.is_aio_simplex():
            zone = 'nova'
            return zone

        zone = 'stxauto'
        storage_backing, hosts = \
            keywords.host_helper.get_storage_backing_with_max_hosts()
        host = hosts[0]
        LOG.fixture_step('Select host {} with backing '
                         '{}'.format(host, storage_backing))
        nova_helper.add_hosts_to_aggregate(aggregate='stxauto', hosts=[host])

        def remove_hosts_from_zone():
            nova_helper.remove_hosts_from_aggregate(aggregate='stxauto',
                                                    check_first=False)

        request.addfinalizer(remove_hosts_from_zone)
        return zone
예제 #7
0
    def add_hosts_to_zone(self, request, add_cgcsauto_zone,
                          get_hosts_per_backing):
        hosts_per_backing = get_hosts_per_backing
        avail_hosts = {
            key: vals[0]
            for key, vals in hosts_per_backing.items() if vals
        }

        if not avail_hosts:
            skip("No host in any storage aggregate")

        nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto',
                                           hosts=list(avail_hosts.values()))

        def remove_hosts_from_zone():
            nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                    check_first=False)

        request.addfinalizer(remove_hosts_from_zone)
        return avail_hosts
    def setup_quota_and_hosts(self, request, add_admin_role_class,
                              add_cgcsauto_zone):
        vm_helper.ensure_vms_quotas(vms_num=10, cores_num=50, vols_num=20)

        storage_backing, target_hosts = host_helper.get_storage_backing_with_max_hosts(
        )
        if len(target_hosts) < 2:
            skip("Less than two up hosts have same storage backing")

        hosts_to_add = target_hosts[:2]
        nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto',
                                           hosts=hosts_to_add)

        def remove_hosts_from_zone():
            nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                    check_first=False)

        request.addfinalizer(remove_hosts_from_zone)

        return storage_backing, hosts_to_add
예제 #9
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
예제 #10
0
    def add_1g_and_4k_pages(self, request, config_host_class,
                            skip_for_one_proc, add_stxauto_zone,
                            add_admin_role_module):
        storage_backing, candidate_hosts = \
            keywords.host_helper.get_storage_backing_with_max_hosts()

        if len(candidate_hosts) < 2:
            skip("Less than two up hosts have same storage backing")

        LOG.fixture_step("Check mempage configs for hypervisors and select "
                         "host to use or configure")
        hosts_selected, hosts_to_configure = get_hosts_to_configure(
            candidate_hosts)

        if set(hosts_to_configure) != {None}:

            def _modify(host):
                is_1g = True if hosts_selected.index(host) == 0 else False
                proc1_kwargs = {'gib_1g': 2, 'gib_4k_range': (None, 2)} if \
                    is_1g else {'gib_1g': 0, 'gib_4k_range': (2, None)}
                kwargs = {'gib_1g': 0, 'gib_4k_range': (None, 2)}, proc1_kwargs

                actual_mems = host_helper._get_actual_mems(host=host)
                LOG.fixture_step("Modify {} proc0 to have 0 of 1G pages and "
                                 "<2GiB of 4K pages".format(host))
                host_helper.modify_host_memory(host,
                                               proc=0,
                                               actual_mems=actual_mems,
                                               **kwargs[0])
                LOG.fixture_step("Modify {} proc1 to have >=2GiB of {} "
                                 "pages".format(host, '1G' if is_1g else '4k'))
                host_helper.modify_host_memory(host,
                                               proc=1,
                                               actual_mems=actual_mems,
                                               **kwargs[1])

            for host_to_config in hosts_to_configure:
                if host_to_config:
                    config_host_class(host=host_to_config, modify_func=_modify)
                    LOG.fixture_step(
                        "Check mem pages for {} are modified "
                        "and updated successfully".format(host_to_config))
                    host_helper.wait_for_memory_update(host=host_to_config)

            LOG.fixture_step("Check host memories for {} after mem config "
                             "completed".format(hosts_selected))
            _, hosts_unconfigured = get_hosts_to_configure(hosts_selected)
            assert not hosts_unconfigured[0], \
                "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \
                "proc1:1g>=2,4k<2gib".format(hosts_unconfigured[0])
            assert not hosts_unconfigured[1], \
                "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \
                "proc1:1g<2,4k>=2gib".format(hosts_unconfigured[1])

        LOG.fixture_step('(class) Add hosts to stxauto aggregate: '
                         '{}'.format(hosts_selected))
        nova_helper.add_hosts_to_aggregate(aggregate='stxauto',
                                           hosts=hosts_selected)

        def remove_host_from_zone():
            LOG.fixture_step('(class) Remove hosts from stxauto aggregate: '
                             '{}'.format(hosts_selected))
            nova_helper.remove_hosts_from_aggregate(aggregate='stxauto',
                                                    check_first=False)

        request.addfinalizer(remove_host_from_zone)

        return hosts_selected, storage_backing