Ejemplo n.º 1
0
def host_to_modify(request):
    """
    Select a hypervisor from existing hosts to test

    Args:
        request: pytset arg

    Returns (str): hostname

    """

    target_host = host_helper.get_up_hypervisors()[0]
    original_backing = host_helper.get_host_instance_backing(host=target_host)

    # Ensure unlock attempt on target_host after running all test cases using this fixture
    HostsToRecover.add(target_host, scope='module')

    def revert_host():
        LOG.fixture_step("Revert {} storage backing to {} if needed".format(
            target_host, original_backing))
        host_helper.set_host_storage_backing(target_host,
                                             inst_backing=original_backing,
                                             check_first=True,
                                             lock=True,
                                             unlock=True)

    request.addfinalizer(revert_host)

    return target_host
Ejemplo n.º 2
0
def setup_host_install(request, get_patch_name):
    con_ssh = ControllerClient.get_active_controller()
    hosts = host_helper.get_up_hypervisors()
    host = hosts[len(hosts) - 1]
    if host == system_helper.get_active_controller_name():
        host = hosts[len(hosts) - 2]
    host_helper.lock_host(host)

    patch_name = get_patch_name
    LOG.fixture_step("Applying {} to patching controller".format(patch_name))
    con_ssh.exec_sudo_cmd('sw-patch upload test_patches/{}.patch'.format(
        patch_name))
    con_ssh.exec_sudo_cmd('sw-patch apply {}'.format(patch_name))

    def delete_patch():
        LOG.fixture_step("Removing {} from patching controller".format(
            patch_name))
        con_ssh.exec_sudo_cmd('sw-patch remove {}'.format(patch_name))
        con_ssh.exec_sudo_cmd('sw-patch delete {}'.format(patch_name))
        LOG.fixture_step("Reinstalling {} to revert the patch".format(patch_name))
        con_ssh.exec_sudo_cmd('sw-patch host-install {}'.format(host),
                              expect_timeout=timeout.CLI_TIMEOUT)
        host_helper.unlock_host(host)

    request.addfinalizer(delete_patch)
    return patch_name, host
Ejemplo n.º 3
0
def pre_check(request):
    """
    This is to adjust the quota
    return: code 0/1
    """
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 3:
        skip('Large heat tests require 3+ hypervisors')

    # disable remote cli for these testcases
    remote_cli = ProjVar.get_var('REMOTE_CLI')
    if remote_cli:
        ProjVar.set_var(REMOTE_CLI=False)

        def revert():
            ProjVar.set_var(REMOTE_CLI=remote_cli)
        request.addfinalizer(revert)

    vm_helper.set_quotas(networks=100)
    vm_helper.ensure_vms_quotas(cores_num=100, vols_num=100, vms_num=100)

    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
    request.addfinalizer(list_status)
Ejemplo n.º 4
0
def pre_check(request):
    """
    This is to adjust the quota and to launch the heat stack
    return: code 0/1
    """
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 3:
        skip('System test heat tests require 3+ hypervisors')

    # disable remote cli for these testcases
    remote_cli = ProjVar.get_var('REMOTE_CLI')
    if remote_cli:
        ProjVar.set_var(REMOTE_CLI=False)

        def revert():
            ProjVar.set_var(REMOTE_CLI=remote_cli)
        request.addfinalizer(revert)

    vm_helper.set_quotas(networks=600, ports=1000, volumes=1000, cores=1000, instances=1000, ram=7168000,
                         server_groups=100, server_group_members=1000)
    system_test_helper.launch_lab_setup_tenants_vms()

    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
        # system_test_helper.delete_lab_setup_tenants_vms()
    request.addfinalizer(list_status)
def test_set_hosts_storage_backing_min(instance_backing, number_of_hosts):
    """
    Modify hosts storage backing if needed so that system has minimal number of hosts in given instance backing

    Args:
        instance_backing:
        number_of_hosts:

    Test Steps:
        - Calculate the hosts to be configured based on test params
        - Configure hosts to meet given criteria
        - Check number of hosts in given instance backing is as specified

    """
    LOG.tc_step("Determine the hosts to configure")
    hosts = host_helper.get_up_hypervisors()
    hosts_len = len(hosts)
    host_num_mapping = {'all': hosts_len, 'two': 2, 'one': 1}
    number_of_hosts = host_num_mapping[number_of_hosts]

    hosts_with_backing = host_helper.get_hosts_in_storage_backing(
        instance_backing)
    if len(hosts_with_backing) >= number_of_hosts:
        LOG.info("Already have {} hosts in {} backing. Do nothing".format(
            len(hosts_with_backing), instance_backing))
        return

    candidate_hosts = get_candidate_hosts(number_of_hosts=number_of_hosts)

    number_to_config = number_of_hosts - len(hosts_with_backing)
    hosts_to_config = list(set(candidate_hosts) -
                           set(hosts_with_backing))[0:number_to_config]

    LOG.tc_step(
        "Delete vms if any to prepare for system configuration change with best effort"
    )
    vm_helper.delete_vms(fail_ok=True)

    LOG.tc_step("Configure following hosts to {} backing: {}".format(
        hosts_to_config, instance_backing))
    for host in hosts_to_config:
        HostsToRecover.add(host)
        host_helper.set_host_storage_backing(host=host,
                                             inst_backing=instance_backing,
                                             unlock=False,
                                             wait_for_configured=False)

    host_helper.unlock_hosts(hosts_to_config,
                             check_hypervisor_up=True,
                             fail_ok=False)

    LOG.tc_step("Waiting for hosts in {} aggregate".format(instance_backing))
    for host in hosts_to_config:
        host_helper.wait_for_host_in_instance_backing(
            host, storage_backing=instance_backing)

    LOG.tc_step("Check number of {} hosts is at least {}".format(
        instance_backing, number_of_hosts))
    assert number_of_hosts <= len(host_helper.get_hosts_in_storage_backing(instance_backing)), \
        "Number of {} hosts is less than {} after configuration".format(instance_backing, number_of_hosts)
Ejemplo n.º 6
0
    def test_evacuate_vms(self, vms_):
        """
        Test evacuated vms
        Args:
            vms_: (fixture to create vms)

        Pre-requisites:
            - At least two up hypervisors on system

        Test Steps:
            - Create vms with various options:
                - vm booted from cinder volume,
                - vm booted from glance image,
                - vm booted from glance image, and have an extra cinder
                volume attached after launch,
                - vm booed from cinder volume with ephemeral and swap disks
            - Move vms onto same hypervisor
            - sudo reboot -f on the host
            - Ensure vms are successfully evacuated to other host
            - Live migrate vms back to original host
            - Check vms can move back, and vms are still reachable from natbox
            - Check system services are enabled and neutron agents are alive

        """
        vms, target_host = vms_

        pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable(
            timeout=20, fail_ok=True)
        up_hypervisors = host_helper.get_up_hypervisors()
        pre_res_neutron, pre_msg_neutron = \
            network_helper.wait_for_agents_healthy(
                up_hypervisors, timeout=20, fail_ok=True)

        LOG.tc_step(
            "reboot -f on vms host, ensure vms are successfully evacuated and "
            "host is recovered after reboot")
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               wait_for_host_up=True,
                               ping_vms=True)

        LOG.tc_step("Check rebooted host can still host vm")
        vm_helper.live_migrate_vm(vms[0], destination_host=target_host)
        vm_helper.wait_for_vm_pingable_from_natbox(vms[0])

        LOG.tc_step("Check system services and neutron agents after {} "
                    "reboot".format(target_host))
        post_res_sys, post_msg_sys = system_helper.wait_for_services_enable(
            fail_ok=True)
        post_res_neutron, post_msg_neutron = \
            network_helper.wait_for_agents_healthy(hosts=up_hypervisors,
                                                   fail_ok=True)

        assert post_res_sys, "\nPost-evac system services stats: {}" \
                             "\nPre-evac system services stats: {}". \
            format(post_msg_sys, pre_msg_sys)
        assert post_res_neutron, "\nPost evac neutron agents stats: {}" \
                                 "\nPre-evac neutron agents stats: {}". \
            format(pre_msg_neutron, post_msg_neutron)
Ejemplo n.º 7
0
def prepare_resource(add_admin_role_module):
    hypervisor = random.choice(host_helper.get_up_hypervisors())
    flavor = nova_helper.create_flavor(name='flavor-1g',
                                       ram=1024,
                                       cleanup='module')[1]
    vol_id = cinder_helper.create_volume('vol-mem_page_size',
                                         cleanup='module')[1]
    return hypervisor, flavor, vol_id
Ejemplo n.º 8
0
def skip_for_one_proc():
    hypervisor = host_helper.get_up_hypervisors()
    if not hypervisor:
        skip("No up hypervisor on system.")

    if len(host_helper.get_host_procs(hostname=hypervisor[0])) < 2:
        skip('At least two processor per compute host is required for this '
             'test.')
Ejemplo n.º 9
0
def check_system():
    storage_backing, hosts = host_helper.get_storage_backing_with_max_hosts()
    up_hypervisors = host_helper.get_up_hypervisors()
    if not up_hypervisors:
        skip('No up hypervisor on system')

    vm_helper.ensure_vms_quotas(vms_num=10, cores_num=20, vols_num=10)

    return hosts, storage_backing, up_hypervisors
Ejemplo n.º 10
0
def check_system():
    if not cinder_helper.is_volumes_pool_sufficient(min_size=80):
        skip("Cinder volume pool size is smaller than 80G")

    if len(host_helper.get_up_hypervisors()) < 2:
        skip("at least two computes are required")

    if len(host_helper.get_storage_backing_with_max_hosts()[1]) < 2:
        skip("at least two hosts with the same storage backing are required")
Ejemplo n.º 11
0
def skip_test_if_less_than_two_hosts(no_simplex):
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 2:
        skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS)

    LOG.fixture_step(
        "Update instance and volume quota to at least 10 and 20 respectively")
    vm_helper.ensure_vms_quotas(vms_num=10)

    return len(hypervisors)
Ejemplo n.º 12
0
def _test_horizon_compute_host_disable_service_negative(hypervisors_pg):
    host_name = host_helper.get_up_hypervisors()[0]
    hypervisors_pg.go_to_compute_host_tab()

    LOG.tc_step('Disable service of the host {}'.format(host_name))
    hypervisors_pg.disable_service(host_name)

    LOG.tc_step('Verify there is error message'.format(host_name))
    assert hypervisors_pg.find_message_and_dismiss(messages.ERROR)
    horizon.test_result = True
Ejemplo n.º 13
0
def test_system_persist_over_host_reboot(host_type, stx_openstack_required):
    """
    Validate Inventory summary over reboot of one of the controller see if data persists over reboot

    Test Steps:
        - capture Inventory summary for list of hosts on system service-list and neutron agent-list
        - reboot the current Controller-Active
        - Wait for reboot to complete
        - Validate key items from inventory persist over reboot

    """
    if host_type == 'controller':
        host = system_helper.get_active_controller_name()
    elif host_type == 'compute':
        if system_helper.is_aio_system():
            skip("No compute host for AIO system")

        host = None
    else:
        hosts = system_helper.get_hosts(personality='storage')
        if not hosts:
            skip(msg="Lab has no storage nodes. Skip rebooting storage node.")

        host = hosts[0]

    LOG.tc_step("Pre-check for system status")
    system_helper.wait_for_services_enable()
    up_hypervisors = host_helper.get_up_hypervisors()
    network_helper.wait_for_agents_healthy(hosts=up_hypervisors)

    LOG.tc_step("Launch a vm")
    vm_id = vm_helper.boot_vm(cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Reboot a {} node and wait for reboot completes: {}".format(host_type, host))
    HostsToRecover.add(host)
    host_helper.reboot_hosts(host)
    host_helper.wait_for_hosts_ready(host)

    LOG.tc_step("Check vm is still active and pingable after {} reboot".format(host))
    vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, timeout=VMTimeout.DHCP_RETRY)

    LOG.tc_step("Check neutron agents and system services are in good state after {} reboot".format(host))
    network_helper.wait_for_agents_healthy(up_hypervisors)
    system_helper.wait_for_services_enable()

    if host in up_hypervisors:
        LOG.tc_step("Check {} can still host vm after reboot".format(host))
        if not vm_helper.get_vm_host(vm_id) == host:
            time.sleep(30)
            vm_helper.live_migrate_vm(vm_id, destination_host=host)
Ejemplo n.º 14
0
def add_host_to_zone(request, add_cgcsauto_zone, add_admin_role_module):
    nova_zone_hosts = host_helper.get_up_hypervisors()
    host_to_add = nova_zone_hosts[0]
    nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto', hosts=host_to_add)

    def remove_host_from_zone():
        nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                check_first=False)

    request.addfinalizer(remove_host_from_zone)

    return host_to_add
Ejemplo n.º 15
0
def pre_configs(request):
    """
    Dovetail test fixture
    Args:
        request:

    - configure sshd_config on tis hosts to allow root access
    - update conf files on dovetail test node on cumulus

    """
    if not ComplianceVar.get_var('DOVETAIL_SUITE'):
        skip('--dovetail-suite unspecified.')

    try:
        import yaml
    except ImportError:
        skip('pyymal package is not installed.')

    computes = host_helper.get_up_hypervisors()
    if len(computes) < 2:
        skip('Less than 2 computes in available states')

    active, standby = system_helper.get_active_standby_controllers()
    if not standby:
        skip('No standby controller on system')

    LOG.fixture_step(
        "Ensure dovetail test node mgmt nic connects to lab under test")
    compliance_helper.update_dovetail_mgmt_interface()

    controllers = [active, standby]
    storages = system_helper.get_hosts(personality='storage',
                                       availability=HostAvailState.AVAILABLE)
    hosts_dict = {
        'controller': controllers,
        'compute': computes,
        'storage': storages
    }
    all_hosts = list(set(controllers + computes + storages))

    LOG.fixture_step(
        "Enable port_security for the system and update existing networks")
    port_security = network_helper.get_network_values(
        'external-net0', 'port_security_enabled')[0]
    port_security = eval(port_security)
    if not port_security:
        system_helper.add_ml2_extension_drivers(drivers='port_security')
        networks = network_helper.get_networks(auth_info=Tenant.get('admin'))
        for net in networks:
            network_helper.set_network(net_id=net, enable_port_security=True)

    configure_tis(all_hosts, request=request)
    configure_dovetail_server(hosts_per_personality=hosts_dict)
def hosts_pci_device_info():
    # get lab host list
    actual_hosts_device_info = {}
    compute_hosts = host_helper.get_up_hypervisors()
    for host in compute_hosts:
        device_info = host_helper.get_host_pci_devices(host, dev_class='Co-processor')
        if device_info:
            actual_hosts_device_info[host] = device_info
    LOG.info("Hosts device info: {}".format(actual_hosts_device_info))

    if not actual_hosts_device_info:
        skip("co-processor PCI device not found")

    hosts_device_info = {}
    sys_host_fields = ('address', 'name', 'vendor id', 'device id')
    for host in actual_hosts_device_info:
        sys_devs = host_helper.get_host_devices(host, field=sys_host_fields)
        actual_pci_devs = actual_hosts_device_info[host]
        hosts_device_info[host] = []
        for dev_info in actual_pci_devs:
            actual_pci_addr, actual_vendor_name, actual_dev_name, vf_dev_id, vf_count = dev_info
            actual_pci_addr = '0000:{}'.format(actual_pci_addr)
            assert actual_pci_addr in sys_devs[0], "Existing Co-processor pci device is not " \
                                                   "listed in system host-device-list"

            hosts_with_dev = [host_ for host_, devs_ in actual_hosts_device_info.items() if
                              actual_dev_name in [dev[2] for dev in devs_]]
            if len(hosts_with_dev) < len(actual_hosts_device_info):
                LOG.info('QAT dev {} is only configured on {}'.format(actual_dev_name,
                                                                      hosts_with_dev))
                continue

            dev_name = actual_dev_name.split(maxsplit=1)[0].lower()
            index = sys_devs[0].index(actual_pci_addr)
            pci_addr, name, vendor_id, device_id = list(zip(*sys_devs))[index]
            dev_info_dict = {'pci_address': pci_addr,
                             'pci_name': name,
                             'vendor_id': vendor_id,
                             'device_id': device_id,
                             'vf_device_id': vf_dev_id,
                             'vf_count': vf_count,
                             'pci_alias': 'qat-{}-vf'.format(dev_name),
                            }

            hosts_device_info[host].append(dev_info_dict)

    hosts_device_info = {k: v for k, v in hosts_device_info.items() if v}
    if not hosts_device_info:
        skip('No common QAT device configured on computes. Skip test.')

    LOG.info('QAT devices to use for test: {}'.format(hosts_device_info))
    vm_helper.ensure_vms_quotas(vms_num=20)
    return hosts_device_info
Ejemplo n.º 17
0
def pb_migrate_test(backup_info, con_ssh, vm_ids=None):
    """
    Run migration test before doing system backup.

    Args:
        backup_info:
            - options for doing backup

        con_ssh:
            - current ssh connection

        vm_ids
    Return:
        None
    """

    hyporvisors = host_helper.get_up_hypervisors(con_ssh=con_ssh)
    if len(hyporvisors) < 2:
        LOG.info(
            'Only {} hyporvisors, it is not enougth to test migration'.format(
                len(hyporvisors)))
        LOG.info('Skip migration test')
        return 0
    else:
        LOG.debug('There {} hyporvisors'.format(len(hyporvisors)))

    LOG.info('Randomly choose some VMs and do migrate:')

    target = random.choice(vm_ids)
    LOG.info('-OK, test migration of VM:{}'.format(target))

    original_host = vm_helper.get_vm_host(target)
    LOG.info('Original host:{}'.format(original_host))

    vm_helper.live_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After live-migration, host:{}'.format(original_host))

    if original_host == current_host:
        LOG.info('backup_info:{}'.format(backup_info))
        LOG.warn(
            'VM is still on its original host, live-migration failed? original host:{}'
            .format(original_host))

    original_host = current_host
    vm_helper.cold_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After code-migration, host:{}'.format(current_host))
    if original_host == current_host:
        LOG.warn(
            'VM is still on its original host, code-migration failed? original host:{}'
            .format(original_host))
Ejemplo n.º 18
0
def test_evacuate_dpdk_and_vhost_vms(add_admin_role_func):
    """
    Skip:
        - Less than 2 up hypervisors with same storage config available on system
    Setups:
        - Add admin role to tenant user under test
    Test Steps:
        - Launch 3 vms on same host with following configs:
            - dpdk vm with 2 vcpus
            - vhost vm with 2 vcpus
            - vhost vm with 3 vcpus
        - sudo reboot -f on vm host
        - Check vms are moved to other host, in active state, and are pingable after evacuation
    Teardown:
        - Remove admin role from tenant user
        - Wait for failed host to recover
        - Delete created vms
    """
    hosts = host_helper.get_up_hypervisors()
    if len(hosts) < 2:
        skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS)

    LOG.tc_step("Boot an observer VM")
    vm_observer = launch_vm(vm_type='dpdk', num_vcpu=2, host=hosts[1])
    vm_helper.setup_avr_routing(vm_observer)

    LOG.tc_step("Launch dpdk and vhost vms")
    vms = []
    vm_host = hosts[0]
    for vm_info in (('dpdk', 3), ('vhost', 2), ('vhost', 3)):
        vm_type, num_vcpu = vm_info
        vm_id = launch_vm(vm_type=vm_type, num_vcpu=num_vcpu, host=vm_host)
        vm_helper.setup_avr_routing(vm_id, vm_type=vm_type)
        vms.append(vm_id)

    LOG.tc_step(
        "Ensure dpdk and vhost vms interfaces are reachable before evacuate")
    vm_helper.ping_vms_from_vm(vms,
                               vm_observer,
                               net_types=['data', 'internal'],
                               vshell=True)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vms, ping_vms=True)
    vm_helper.ping_vms_from_vm(vms,
                               vm_observer,
                               net_types=['data', 'internal'],
                               vshell=True)
Ejemplo n.º 19
0
def _get_nova_alias(class_id, dev_type, regex=False):
    hosts = host_helper.get_up_hypervisors()
    devices = keywords.host_helper.get_host_devices(host=hosts[0],
                                                    field='address',
                                                    list_all=True,
                                                    regex=regex,
                                                    **{'class id': class_id})
    dev_len = min(len(devices), 2)
    devices = devices[:dev_len]

    nova_devices = network_helper.create_pci_alias_for_devices(
        dev_type=dev_type, devices=devices)
    nova_alias = nova_devices[0]['pci alias']
    LOG.info("nova alias name {}".format(nova_alias))
    return nova_alias
Ejemplo n.º 20
0
    def pci_dev_numa_nodes(self, vif_model_check):
        vif_model = vif_model_check[0]
        hosts = host_helper.get_up_hypervisors()
        hosts_pci_numa = network_helper.get_pci_device_numa_nodes(hosts)
        hosts_pciif_procs = network_helper.get_pci_procs(hosts,
                                                         net_type=vif_model)

        # Get number of hosts that has pcipt/sriov interface on same numa node as pci device
        numa_match = 0
        for host_ in hosts:
            LOG.info('PCI_NUMA_{}: {}; PCIIF_PROCS_{}: {}'.format(
                host_, hosts_pci_numa[host_], host_, hosts_pciif_procs[host_]))
            if set(hosts_pci_numa[host_]).intersection(
                    set(hosts_pciif_procs[host_])):
                numa_match += 1
                if numa_match == 2:
                    break

        return numa_match
def check_system():
    LOG.info("Getting host list")
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 3:
        skip("Less than 3 hypervisors on system. Skip the test.")

    LOG.info("check if the lab has vxlan enabled network")
    providernets = system_helper.get_data_networks(field='name',
                                                   network_type='vxlan')
    if not providernets:
        skip("Vxlan provider-net not configured")

    for pnet in providernets:
        internal_nets = network_helper.get_networks_on_providernet(
            providernet=pnet, strict=False, name='internal')
        if internal_nets:
            break
    else:
        skip('Internal nets are not configured with vxlan.')
    return hypervisors
Ejemplo n.º 22
0
def get_candidate_hosts(number_of_hosts):

    candidate_hosts = host_helper.get_up_hypervisors()
    hosts_len = len(candidate_hosts)

    if hosts_len < number_of_hosts:
        # configure down hosts as well in case not enought up hosts available
        extra_num = number_of_hosts - hosts_len
        down_hosts = host_helper.get_hypervisors(state='down')
        assert len(down_hosts) >= extra_num, \
            "Less than {} hypervisors on system to" \
            " configure".format(number_of_hosts)
        candidate_hosts += down_hosts[:extra_num]

    # Following assert should never fail, otherwise automation code needs
    # to be checked
    assert len(candidate_hosts) >= number_of_hosts, \
        "Not enough hosts available for configuration."

    return candidate_hosts
Ejemplo n.º 23
0
def _test_set_cpu_cores_denied_unlocked_host():
    nova_hosts = host_helper.get_up_hypervisors()

    assert nova_hosts, "No nova host is up."

    LOG.tc_step("Verify host-cpu-modify is rejected if host is unlocked.")
    for host in nova_hosts:
        code, msg = host_helper.modify_host_cpu(host, 'vswitch', p0=1, fail_ok=True)
        assert 1 == code, "modify host cpu CLI is not rejected with return code 1."
        assert 'Host must be locked' in msg

        LOG.tc_step("Verify one ore more cpu cores are assigned to Platform and vSwitch.")
        table_ = table_parser.table(cli.system('host-cpu-list', host)[1])

        platform_cores = table_parser.get_values(table_, 'log_core', assigned_function='Platform')

        assert len(platform_cores) >= 1, "At least one core should be assigned to Platform"

        vswitch_cores = table_parser.get_values(table_, 'log_core', assigned_function='vSwitch')
        assert len(vswitch_cores) >= 1, "At least one core should be assigned to vSwitch"
def test_apply_storage_profile_negative(create_storage_profile, personality):

    if personality == 'controller':
        host_name = system_helper.get_standby_controller_name()
        assert host_name, "No standby controller available on system"
    else:
        host_name = host_helper.get_up_hypervisors()[0]

    # For storage systems, skip test if ceph isn't healthy
    if len(system_helper.get_storage_nodes()) > 0:
        ceph_healthy = storage_helper.is_ceph_healthy()
        if not ceph_healthy:
            skip('Skipping due to ceph not being healthy')

    profile_name = create_storage_profile['profile_name']
    origin_disk_num = create_storage_profile['disk_num']
    disks_num = len(storage_helper.get_host_disks(host_name, 'device_node'))

    expt_err = 'profile has more disks than host does' if disks_num < origin_disk_num -1 \
        else "Please check if host's disks match profile criteria"
    expt_err_list = [
        "Please check if host's disks match profile criteria",
        "Failed to create storage function. Host personality must be 'storage'",
    ]
    if disks_num < origin_disk_num - 1:
        expt_err_list.append("profile has more disks than host does")

    positional_arg = host_name + ' ' + profile_name

    HostsToRecover.add(host_name)
    host_helper.lock_host(host_name, swact=True)
    exitcode, output = cli.system('host-apply-storprofile',
                                  positional_arg,
                                  fail_ok=True)
    host_helper.unlock_host(host_name)

    assert exitcode == 1 and any(expt in output for expt in expt_err_list)
Ejemplo n.º 25
0
def test_compute_mempage_vars(hosts=None):
    """
    Steps:
        - Collect host mempage stats from system host-memory-list
        - Ensure the stats collected are reflected in following places:
            - nova hypervisor-show
            - /sys/devices/system/node/node*/hugepages/ on compute host

    Args:
        hosts (str|None|list|tuple): this param is reserved if any test wants to call this as
            verification step

    """
    if isinstance(hosts, str):
        hosts = [hosts]
    elif not hosts:
        hosts = host_helper.get_up_hypervisors()

    if not hosts:
        skip('No hosts available')

    LOG.info("---Wait for system host-memory-list updated for a random host")
    host_helper.wait_for_memory_update(hosts[0])

    for host in hosts:
        LOG.info(
            "---Check {} memory info in system host-memory-list and on host".
            format(host))
        headers = [
            'vs_hp_size(MiB)', 'vs_hp_total', 'app_total_4K',
            'app_hp_total_2M', 'app_hp_total_1G', 'app_hp_avail_2M',
            'app_hp_avail_1G'
        ]
        cli_vars = check_meminfo_via_sysinv_nova_cli(host=host,
                                                     headers=headers)
        check_memconfs_on_host(host=host, cli_vars=cli_vars)
Ejemplo n.º 26
0
 def skip_test_if_less_than_two_hosts(self):
     if len(host_helper.get_up_hypervisors()) < 2:
         skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS)
Ejemplo n.º 27
0
def get_target_host():
    host = host_helper.get_up_hypervisors()[0]
    return host
Ejemplo n.º 28
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
Ejemplo n.º 29
0
def test_swact_controllers(stx_openstack_required,
                           wait_for_con_drbd_sync_complete):
    """
    Verify swact active controller

    Test Steps:
        - Boot a vm on system and check ping works
        - Swact active controller
        - Verify standby controller and active controller are swapped
        - Verify vm is still pingable

    """
    if not wait_for_con_drbd_sync_complete:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    LOG.tc_step('retrieve active and available controllers')
    pre_active_controller, pre_standby_controller = system_helper.get_active_standby_controllers(
    )
    assert pre_standby_controller, "No standby controller available"

    pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable(
        timeout=20, fail_ok=True)
    up_hypervisors = host_helper.get_up_hypervisors()
    pre_res_neutron, pre_msg_neutron = network_helper.wait_for_agents_healthy(
        up_hypervisors, timeout=20, fail_ok=True)

    LOG.tc_step("Boot a vm from image and ping it")
    vm_id_img = vm_helper.boot_vm(name='swact_img',
                                  source='image',
                                  cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img)

    LOG.tc_step("Boot a vm from volume and ping it")
    vm_id_vol = vm_helper.boot_vm(name='swact', cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol)

    LOG.tc_step(
        "Swact active controller and ensure active controller is changed")
    host_helper.swact_host(hostname=pre_active_controller)

    LOG.tc_step("Verify standby controller and active controller are swapped")
    post_active_controller = system_helper.get_active_controller_name()
    post_standby_controller = system_helper.get_standby_controller_name()

    assert pre_standby_controller == post_active_controller, \
        "Prev standby: {}; Post active: {}".format(
            pre_standby_controller, post_active_controller)
    assert pre_active_controller == post_standby_controller, \
        "Prev active: {}; Post standby: {}".format(
            pre_active_controller, post_standby_controller)

    LOG.tc_step("Check boot-from-image vm still pingable after swact")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img, timeout=30)
    LOG.tc_step("Check boot-from-volume vm still pingable after swact")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol, timeout=30)

    LOG.tc_step(
        "Check system services and neutron agents after swact from {}".format(
            pre_active_controller))
    post_res_sys, post_msg_sys = system_helper.wait_for_services_enable(
        fail_ok=True)
    post_res_neutron, post_msg_neutron = network_helper.wait_for_agents_healthy(
        hosts=up_hypervisors, fail_ok=True)

    assert post_res_sys, \
        "\nPost-evac system services stats: {}\nPre-evac system services stats: {}". \
        format(post_msg_sys, pre_msg_sys)
    assert post_res_neutron, \
        "\nPost evac neutron agents stats: {}\nPre-evac neutron agents stats: {}". \
        format(pre_msg_neutron, post_msg_neutron)

    LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact")
    kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller,
                                            pre_standby_controller),
                                     timeout=30)
Ejemplo n.º 30
0
def check_numa_num():
    hypervisor = host_helper.get_up_hypervisors()
    if not hypervisor:
        skip("No up hypervisor on system.")

    return len(host_helper.get_host_procs(hostname=hypervisor[0]))