Exemple #1
0
def test_vswitch_ports_cores_mapping():
    if not system_helper.is_avs():
        skip("vshell commands unsupported by OVS")

    up_hypervisors = host_helper.get_hypervisors(state='up')
    assert up_hypervisors, "No hypervisor is up."

    for host in up_hypervisors:
        LOG.tc_step("Find out expected port-engine mapping for {} via vshell port/engine-list".format(host))

        check_helper.check_host_vswitch_port_engine_map(host)
Exemple #2
0
def get_suitable_hypervisors():
    """
    Get low latency hypervisors with HT-off

    TODO: following settings should checked, but most of them cannot be easily done automatically
    # Processor Configuration
    # Hyper-Threading = Disabled
    # Power & Performance
    # Policy = Performance
    # Workload = Balanced
    # P-States
    # SpeedStep = Enabled
    # Turbo Boost = Enabled
    # Energy Efficient Turbo = Disabled
    # C-States
    # CPU C-State = Disabled
    # Acoustic and Performance
    # Fan Profile = Performance:

    """
    global testable_hypervisors

    LOG.fixture_step(
        'Check if the lab meets conditions required by this test case')
    hypervisors = host_helper.get_hypervisors()

    for hypervisor in hypervisors:
        personality, subfunc = system_helper.get_host_values(
            hypervisor, ('personality', 'subfunctions'))
        personalities = subfunc + personality
        if not personalities or 'lowlatency' not in personalities:
            continue

        cpu_info, num_threads, vm_cores, num_cores = get_cpu_info(hypervisor)
        if cpu_info and 'topology' in cpu_info and cpu_info['topology'][
                'threads'] == 1:
            if num_threads != 1:
                LOG.warn(
                    'conflicting info: num_threads={}, while cpu_info.threads={}'
                    .format(num_threads, cpu_info['topology']['threads']))
            testable_hypervisors[hypervisor] = {
                'personalities': personalities,
                'cpu_info': cpu_info,
                'vm_cores': vm_cores,
                'num_cores': num_cores,
                'for_host_test': False,
                'for_vm_test': False,
            }
        else:
            LOG.warning(
                'hypervisor:{} has HT-on, ignore it'.format(hypervisor))

    return testable_hypervisors.keys()
Exemple #3
0
def test_ceilometer_meters_exist(meters):
    """
    Validate ceilometer meters exist
    Verification Steps:
    1. Check via 'openstack metric list' or 'ceilometer event-list'
    2. Check meters for router, subnet, image, and vswitch exists
    """
    # skip('CGTS-10102: Disable TC until US116020 completes')
    time_create = system_helper.get_host_values('controller-1',
                                                'created_at')[0]
    current_isotime = datetime.utcnow().isoformat(sep='T')

    if common.get_timedelta_for_isotimes(
            time_create, current_isotime) > timedelta(hours=24):
        skip("Over a day since install. Meters no longer exist.")

    # Check meter for routers
    LOG.tc_step(
        "Check number of 'router.create.end' events is at least the number of existing routers"
    )
    routers = network_helper.get_routers()
    router_id = routers[0]
    check_event_in_tenant_or_admin(resource_id=router_id,
                                   event_type='router.create.end')

    # Check meter for subnets
    LOG.tc_step(
        "Check number of 'subnet.create' meters is at least the number of existing subnets"
    )
    subnets = network_helper.get_subnets(
        name=Tenant.get_primary().get('tenant'), strict=False)
    subnet = random.choice(subnets)
    LOG.info("Subnet to check in ceilometer event list: {}".format(subnet))
    check_event_in_tenant_or_admin(resource_id=subnet,
                                   event_type='subnet.create.end')

    # Check meter for image
    LOG.tc_step('Check meters for image')
    images = glance_helper.get_images(field='id')
    resource_ids = gnocchi_helper.get_metrics(metric_name='image.size',
                                              field='resource_id')
    assert set(images) <= set(resource_ids)

    # Check meter for vswitch
    LOG.tc_step('Check meters for vswitch')
    resource_ids = gnocchi_helper.get_metrics(
        metric_name='vswitch.engine.util', fail_ok=True, field='resource_id')
    if system_helper.is_avs():
        hypervisors = host_helper.get_hypervisors()
        assert len(hypervisors) <= len(resource_ids), \
            "Each nova hypervisor should have at least one vSwitch core"
    else:
        assert not resource_ids, "vswitch meters found for STX build"
Exemple #4
0
def launch_instances(create_flavors_and_images, create_network_performance):
    LOG.fixture_step("Creating instances")
    net_id_list = list()
    net_id_list.append({"net-id": create_network_performance[0]})
    host = host_helper.get_hypervisors()[0]
    vm_id = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
                              nics=net_id_list,
                              source="image",
                              source_id=create_flavors_and_images["image"],
                              vm_host=host,
                              cleanup="module")[1]
    # TODO check power state RUNNING?
    return vm_id
def prepare_hosts(request):
    """
    Setup:
        Attempt to convert all computes to expected storage backing.
        Skip test if unsuccessful.

    Args:
        request: expected host storage backing to run the test

    Returns: hosts storage backing

    Teardown:
        Restore hosts to original state
    """
    expected_storage_backing = request.param
    avail_hosts = host_helper.get_hosts_in_storage_backing(
        storage_backing=expected_storage_backing)
    all_hosts = host_helper.get_hypervisors()
    modified_hosts = {}
    locked_hosts = []
    avail_num = len(avail_hosts)

    # Try to convert all available hypervisor hosts to the expected storage backing
    for host in all_hosts:
        if host not in avail_hosts:
            original_storage = host_helper.get_host_instance_backing(host)
            return_code, msg = host_helper.set_host_storage_backing(
                host=host, inst_backing=expected_storage_backing, fail_ok=True)
            if return_code == 0:
                avail_num += 1
                modified_hosts[host] = original_storage
            elif return_code == 1:  # Host locked, but cannot modify to the expected storage backing
                locked_hosts.append(host)
            else:
                skip("Host {} cannot be locked. Error: {}".format(host, msg))

    # Skip test if config failed
    if avail_num < 2:
        skip("Less than two hosts are successfully modified to {} backing".
             format(expected_storage_backing))

    # Teardown to restore hosts to original storage backing
    def restore_hosts():
        LOG.debug("Modifying hosts backing to original states..")
        host_helper.unlock_hosts(locked_hosts)
        for host in modified_hosts:
            host_helper.set_host_storage_backing(host, modified_hosts[host])

    request.addfinalizer(restore_hosts())

    return request.param
Exemple #6
0
def test_dead_office_recovery(reserve_unreserve_all_hosts_module):
    """
    Test dead office recovery with vms
    Args:
        reserve_unreserve_all_hosts_module: test fixture to reserve unreserve all vlm nodes for lab under test

    Setups:
        - Reserve all nodes in vlm

    Test Steps:
        - Boot 5 vms with various boot_source, disks, etc and ensure they can be reached from NatBox
        - Power off all nodes in vlm using multi-processing to simulate a power outage
        - Power on all nodes
        - Wait for nodes to become online/available
        - Check vms are recovered after hosts come back up and vms can be reached from NatBox

    """
    LOG.tc_step("Boot 5 vms with various boot_source, disks, etc")
    vms = vm_helper.boot_vms_various_types()

    hosts = system_helper.get_hosts()
    hosts_to_check = system_helper.get_hosts(availability=['available', 'online'])

    LOG.info("Online or Available hosts before power-off: {}".format(hosts_to_check))
    LOG.tc_step("Powering off hosts in multi-processes to simulate power outage: {}".format(hosts))
    region = None
    if ProjVar.get_var('IS_DC'):
        region = ProjVar.get_var('PRIMARY_SUBCLOUD')

    try:
        vlm_helper.power_off_hosts_simultaneously(hosts, region=region)
    except:
        raise
    finally:
        LOG.tc_step("Wait for 60 seconds and power on hosts: {}".format(hosts))
        time.sleep(60)
        LOG.info("Hosts to check after power-on: {}".format(hosts_to_check))
        vlm_helper.power_on_hosts(hosts, reserve=False, reconnect_timeout=HostTimeout.REBOOT+HostTimeout.REBOOT,
                                  hosts_to_check=hosts_to_check, region=region)

    LOG.tc_step("Check vms are recovered after dead office recovery")
    vm_helper.wait_for_vms_values(vms, fail_ok=False, timeout=600)
    for vm in vms:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm, timeout=VMTimeout.DHCP_RETRY)
    computes = host_helper.get_hypervisors()
    if len(computes) >= 4:
        system_helper.wait_for_alarm(alarm_id=EventLogID.MULTI_NODE_RECOVERY, timeout=120)
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.MULTI_NODE_RECOVERY, check_interval=60, timeout=1200)
def create_instances(create_flavors_and_images, create_network_performance):
    LOG.fixture_step("Creating instances")
    net_id_list = list()
    net_id_list.append({"net-id": create_network_performance[0]})
    host = host_helper.get_hypervisors()[1]
    vm_id_1 = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
                                nics=net_id_list, source="image",
                                source_id=create_flavors_and_images["image"],
                                vm_host=host, cleanup="module")[1]
    vm_id_2 = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
                                nics=net_id_list, source="image",
                                source_id=create_flavors_and_images["image"],
                                vm_host=host, cleanup="module")[1]
    vm_ip_1 = vm_helper.get_vm_values(vm_id=vm_id_1, fields='addresses')[0].split("=")[1]
    vm_ip_2 = vm_helper.get_vm_values(vm_id=vm_id_2, fields='addresses')[0].split("=")[1]
    return {"vm_id_1": vm_id_1,
            "vm_id_2": vm_id_2,
            "vm_ip_1": vm_ip_1,
            "vm_ip_2": vm_ip_2}
Exemple #8
0
def get_candidate_hosts(number_of_hosts):

    candidate_hosts = host_helper.get_up_hypervisors()
    hosts_len = len(candidate_hosts)

    if hosts_len < number_of_hosts:
        # configure down hosts as well in case not enought up hosts available
        extra_num = number_of_hosts - hosts_len
        down_hosts = host_helper.get_hypervisors(state='down')
        assert len(down_hosts) >= extra_num, \
            "Less than {} hypervisors on system to" \
            " configure".format(number_of_hosts)
        candidate_hosts += down_hosts[:extra_num]

    # Following assert should never fail, otherwise automation code needs
    # to be checked
    assert len(candidate_hosts) >= number_of_hosts, \
        "Not enough hosts available for configuration."

    return candidate_hosts
Exemple #9
0
def ovs_dpdk_1_core():
    LOG.fixture_step("Review the ovs-dpdk vswitch be in just 1 core")
    vswitch_type = "ovs-dpdk"
    cpu_function = "vswitch"
    proc = "0"
    host_list = host_helper.get_hypervisors()
    for host in host_list:
        with host_helper.ssh_to_host(host) as node_ssh:
            cmd = "cat /proc/meminfo | grep Hugepagesize | awk '{print $2}'"
            hp = int(
                node_ssh.exec_cmd(cmd=cmd, fail_ok=False,
                                  get_exit_code=False)[1])
        mem = host_helper.get_host_memories(
            host=host,
            headers=("app_hp_avail_2M", "app_hp_avail_1G", "mem_avail(MiB)",
                     "vs_hp_total"))
        if hp == 1048576:
            if int(mem[proc][3]) < 2 or mem[proc][1] < 10:
                HostsToRecover.add(hostnames=host, scope="module")
                host_helper.lock_host(host=host)
                if int(mem[proc][3]) < 2:
                    args = ' -f vswitch -1G {} {} {}'.format(2, host, proc)
                    cli.system('host-memory-modify', args)
                    host_helper.modify_host_cpu(host=host,
                                                cpu_function=cpu_function,
                                                **{"p{}".format(proc): 1})
                    # TODO maybe find a better option than sleep since we can't wait for applyying
                    # container_helper.wait_for_apps_status(apps='stx-openstack',
                    #                                       status=AppStatus.APPLYING)
                    time.sleep(60)
                    container_helper.wait_for_apps_status(
                        apps='stx-openstack',
                        status=AppStatus.APPLIED,
                        check_interval=30)
                if mem[proc][1] < 10:
                    args = ' -1G {} {} {}'.format(10, host, proc)
                    cli.system('host-memory-modify', args)
                host_helper.unlock_host(host=host)
        if hp == 2048:
            if int(mem[proc][3]) < 512 or mem[proc][0] < 2500:
                host_helper.lock_host(host=host)
                if int(mem[proc][3]) < 512:
                    system_helper.modify_system(
                        **{"vswitch_type": vswitch_type})
                    vswitch_args = ' -f vswitch -2M {} {} {}'.format(
                        512, host, proc)
                    cli.system('host-memory-modify', vswitch_args)
                    host_helper.modify_host_cpu(host=host,
                                                cpu_function=cpu_function,
                                                **{"p{}".format(proc): 1})
                    # TODO maybe find a better option than sleep since we can't wait for applyying
                    # container_helper.wait_for_apps_status(apps='stx-openstack',
                    #                                     status=AppStatus.APPLIED)
                    time.sleep(60)
                    container_helper.wait_for_apps_status(
                        apps='stx-openstack',
                        status=AppStatus.APPLIED,
                        check_interval=30)
                if mem[proc][0] < 2500:
                    args = ' -2M {} {} {}'.format(2500, host, proc)
                    cli.system('host-memory-modify', args)
                host_helper.unlock_host(host=host)

        test_table = host_helper.get_host_cpu_list_table(host=host)
        curr_assigned_function_list = table_parser.get_values(
            test_table, "assigned_function")
        assert "vSwitch" in curr_assigned_function_list
def less_than_two_hypervisors():
    return len(host_helper.get_hypervisors()) < 2
Exemple #11
0
def __remove_or_add_hosts_in_aggregate(aggregate,
                                       hosts=None,
                                       remove=False,
                                       check_first=True,
                                       fail_ok=False,
                                       con_ssh=None,
                                       auth_info=Tenant.get('admin')):
    """
    Remove/Add hosts from/to given aggregate

    Args:
        aggregate (str): name of the aggregate to add/remove hosts. cgcsauto aggregate can be added
            via add_cgcsauto_zone session fixture
        hosts (list|str):
        remove (bool): True if remove hosts from given aggregate, otherwise add hosts to aggregate
        check_first (bool):
        fail_ok (bool):
        con_ssh (SSHClient):
        auth_info (dict):

    Returns (tuple):
        (0, "Hosts successfully removed from aggregate")
        (1, <stderr>)       cli rejected on at least one host
        (2, "Host(s) still exist in aggregate <aggr> after aggregate-remove-host: <unremoved_hosts>)

    """
    hosts_in_aggregate = get_hosts_in_aggregate(aggregate, con_ssh=con_ssh)

    if hosts is None:
        if remove:
            hosts = hosts_in_aggregate
        else:
            from keywords import host_helper
            hosts = host_helper.get_hypervisors()

    if isinstance(hosts, str):
        hosts = [hosts]

    msg_str = 'Remov' if remove else 'Add'
    LOG.info("{}ing hosts {} in aggregate {}".format(msg_str, hosts,
                                                     aggregate))
    if check_first:
        if remove:
            hosts_to_rm_or_add = list(set(hosts) & set(hosts_in_aggregate))
        else:
            hosts_to_rm_or_add = list(set(hosts) - set(hosts_in_aggregate))
    else:
        hosts_to_rm_or_add = list(hosts)

    if not hosts_to_rm_or_add:
        warn_str = 'No' if remove else 'All'
        msg = "{} given host(s) in aggregate {}. Do nothing. Given hosts: {}; hosts in " \
              "aggregate: {}".format(warn_str, aggregate, hosts, hosts_in_aggregate)
        LOG.warning(msg)
        return -1, msg

    failed_res = {}
    cmd = 'aggregate remove host' if remove else 'aggregate add host'
    for host in hosts_to_rm_or_add:
        args = '{} {}'.format(aggregate, host)
        code, output = cli.openstack(cmd,
                                     args,
                                     ssh_client=con_ssh,
                                     fail_ok=True,
                                     auth_info=auth_info)
        if code > 0:
            failed_res[host] = output

    if failed_res:
        err_msg = "'{}' is rejected for following host(s) in aggregate {}: {}".format(
            cmd, aggregate, failed_res)
        if fail_ok:
            LOG.warning(err_msg)
            return 1, err_msg
        else:
            raise exceptions.NovaError(err_msg)

    post_hosts_in_aggregate = get_hosts_in_aggregate(aggregate,
                                                     con_ssh=con_ssh)
    if remove:
        failed_hosts = list(set(hosts) & set(post_hosts_in_aggregate))
    else:
        failed_hosts = list(set(hosts) - set(post_hosts_in_aggregate))

    if failed_hosts:
        err_msg = "{} accepted, but some host(s) are not {}ed in aggregate {}: {}".format(
            cmd, msg_str, aggregate, failed_hosts)
        if fail_ok:
            LOG.warning(err_msg)
            return 2, err_msg
        else:
            raise exceptions.NovaError(err_msg)

    succ_msg = "Hosts successfully {}ed in aggregate {}: {}".format(
        msg_str.lower(), aggregate, hosts)
    LOG.info(succ_msg)
    return 0, succ_msg
Exemple #12
0
def test_snat_computes_lock_reboot(snat_setups):
    """
    test vm external access after host compute reboot with all rest of computes locked

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Steps:
        - Ping VM {} from NatBox
        - Lock all nova hosts except the vm host
        - Ping external from vm
        - Reboot VM host
        - Wait for vm host to complete reboot
        - Verify vm is recovered after host reboot complete and can still ping outside

    Test Teardown:
        - Unlock all hosts
        - Delete the created vm     (module)
        - Disable SNAT on router    (module)

    """
    hypervisors = host_helper.get_hypervisors(state='up')
    if len(hypervisors) > 3:
        skip("More than 3 hypervisors on system. Skip to reduce run time.")

    vm_ = snat_setups[0]
    LOG.tc_step("Ping VM {} from NatBox".format(vm_))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)

    vm_host = vm_helper.get_vm_host(vm_)
    LOG.info("VM host is {}".format(vm_host))
    assert vm_host in hypervisors, "vm host is not in nova hypervisor-list"

    hosts_should_lock = set(hypervisors) - {vm_host}
    hosts_already_locked = set(system_helper.get_hosts(administrative='locked'))
    hosts_to_lock = list(hosts_should_lock - hosts_already_locked)
    LOG.tc_step("Lock all compute hosts {} except vm host {}".format(hosts_to_lock, vm_host))
    for host_ in hosts_to_lock:
        HostsToRecover.add(host_, scope='function')
        host_helper.lock_host(host_, swact=True)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_, timeout=120)
    LOG.tc_step("Ping external from vm {}".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Evacuate vm and expect VM to stay on same host")
    code, output = vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_, fail_ok=True)
    assert code > 0, "Actual: {}".format(output)

    LOG.tc_step("Verify vm is recovered and can still ping outside")
    host_helper.wait_for_hosts_ready(hosts=vm_host)
    vm_helper.wait_for_vm_status(vm_id=vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_, use_fip=True, timeout=60)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
def test_vswitch_line_rate_1core(ovs_dpdk_1_core, create_instances, create_network_performance,
                                 no_simplex, no_duplex):

    LOG.tc_step("Add icmp and tcp rules")
    project_id = keystone_helper.get_projects(name='admin')[0]
    security_group = network_helper.get_security_groups(project=project_id)[0]
    network_helper.add_icmp_and_tcp_rules(security_group=security_group)

    LOG.tc_step("Get original vswitch_type and assigned_function properties")
    host_list = host_helper.get_hypervisors()

    LOG.tc_step("Sync instance with compute containing ns for ssh")
    host, ns = get_host_and_ns(netid=create_network_performance[0], host_list=host_list)
    assert ns is not None, "namespace not found on host list {}".format(host_list)
    if host_list[1] != host:
        vm_helper.live_migrate_vm(vm_id=create_instances["vm_id_1"], destination_host=host)
        vm_helper.live_migrate_vm(vm_id=create_instances["vm_id_2"], destination_host=host)

    LOG.tc_step("Connect to compute node containing images")
    with host_helper.ssh_to_host(host) as node_ssh:
        LOG.tc_step("Create huge file on {}".format(create_instances["vm_id_1"]))
        ssh_cmd = ('ip netns exec {}'
                   ' ssh-keygen -R "{}"'
                   ''.format(ns, create_instances["vm_ip_1"]))
        node_ssh.send_sudo(cmd=ssh_cmd)
        node_ssh.expect()
        ssh_cmd = ('ip netns exec {} '
                    'ssh -o StrictHostKeyChecking=no '
                    '{}@{} "dd if=/dev/zero of=/tmp/test_file count={} bs=1G"'
                    ''.format(ns,
                              IMAGE_USER,
                              create_instances["vm_ip_1"],
                              FILE_SIZE))
        node_ssh.send_sudo(cmd=ssh_cmd)
        node_ssh.expect(['password:'******'Password:'******'{}\+0 records out'.format(FILE_SIZE)], timeout=180)
        assert index == 0, "File created successfully"

        LOG.tc_step("Copy created file from {} to {}".format(create_instances["vm_id_1"],
                                                             create_instances["vm_id_2"]))

        res = list()

        for i in range(2):
            LOG.tc_step("Start of iter {}".format(i))
            ssh_cmd = ('ip netns exec {}'
                    ' ssh-keygen -R "{}"'
                    ''.format(ns, create_instances["vm_ip_1"]))
            node_ssh.send_sudo(cmd=ssh_cmd)
            node_ssh.expect()
            ssh_cmd = ('ip netns exec {} '
                        'ssh -o StrictHostKeyChecking=no '
                        '{}@{} "ls -lrtsh /tmp/test_file;'
                        ' echo start=$(date +%s%N);'
                        ' time scp -vvv /tmp/test_file {}@{};'
                        ' echo end=$(date +%s%N)"'
                        ''.format(ns,
                                IMAGE_USER,
                                create_instances["vm_ip_1"],
                                IMAGE_USER,
                                create_instances["vm_ip_2"]))
            node_ssh.send_sudo(cmd=ssh_cmd)
            node_ssh.expect(['password:'******'Password:'], timeout=10, searchwindowsize=100)
            node_ssh.send(cmd=IMAGE_PASS)
            index = node_ssh.expect(timeout=120)
            assert index == 0, "File tranfered successfully"
            real_time = None
            for line in node_ssh.cmd_output.split("\n"):
                if "real" in line:
                    real_time = int(line.split()[1][:1]) * 60 + float(line.split()[2][:-1])
            LOG.info("real time = {}".format(real_time))
            rate = FILE_SIZE * 1000 / real_time
            res.append(rate)

    final_res = sum(res) / len(res)
    LOG.info("Avg time is : {} MB/s".format(round(final_res, 3)))
Exemple #14
0
def _test_cpu_pol_dedicated_shared_coexists(vcpus_dedicated, vcpus_shared, pol_source, boot_source):
    """
    Test two vms coexisting on the same host, one with the dedicated cpu property, and one with the shared cpu property.

    Args:
        vcpus_dedicated: Amount of vcpu(s) to allocate for the vm with the dedicated CPU_POLICY.
        vcpus_shared: Amount of vcpu(s) to allocate for the vm with the shared CPU_POLICY.
        pol_source: Where the CPU_POLICY is set from.
        boot_source: The boot media the vm will use to boot.

    Test Setups:
        - Create two flavors, one for each vm.
        - If using 'flavor' for pol_source, set extra specs for the CPU_POLICY.
        - If using 'image' for pol_source, set ImageMetaData for the CPU_POLICY.
        - If using 'volume' for boot_source, create volume from tis image.
        - If using 'image' for boot_source, use tis image.
        - Determine the amount of free vcpu(s) on the compute before testing.

    Test Steps:
        - Boot the first vm with CPU_POLICY: dedicated.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Determine the amount of free vcpu(s) on the compute.
        - Boot the second vm with CPU_POLICY: shared.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Delete vms
        - Determine the amount of free vcpu(s) on the compute after testing.
        - Compare free vcpu(s) on the compute before and after testing, ensuring they are the same.

    Test Teardown
        - Delete created volumes and flavors
    """
    LOG.tc_step("Getting host list")
    target_hosts = host_helper.get_hypervisors(state='up')
    target_host = target_hosts[0]
    storage_backing = host_helper.get_host_instance_backing(host=target_host)
    if 'image' in storage_backing:
        storage_backing = 'local_image'
    elif 'remote' in storage_backing:
        storage_backing = 'remote'

    image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)
    pre_test_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    collection = ['dedicated', 'shared']
    vm_ids = []
    for x in collection:
        if x == 'dedicated':
            vcpus = vcpus_dedicated
        else:
            vcpus = vcpus_shared
        LOG.tc_step("Create {} flavor with {} vcpus".format(x, vcpus))
        flavor_id = nova_helper.create_flavor(name=x, vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_id)

        if pol_source == 'flavor':
            LOG.tc_step("Set CPU_POLICY for {} flavor".format(x))
            specs = {FlavorSpec.CPU_POLICY: x}
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            LOG.tc_step("Create image with CPU_POLICY: {}".format(x))
            image_meta = {ImageMetadata.CPU_POLICY: x}
            image_id = glance_helper.create_image(name='cpu_pol_{}'.format(x), cleanup='function', **image_meta)[1]

        if boot_source == 'volume':
            LOG.tc_step("Create volume from image")
            source_id = cinder_helper.create_volume(name='cpu_pol_{}'.format(x), source_id=image_id)[1]
            ResourceCleanup.add('volume', source_id)
        else:
            source_id = image_id

        pre_boot_cpus = host_helper.get_vcpus_for_computes(field='used_now')
        LOG.tc_step("Booting cpu_pol_{}".format(x))
        vm_id = vm_helper.boot_vm(name='cpu_pol_{}'.format(x), flavor=flavor_id, source=boot_source,
                                  source_id=source_id, avail_zone='nova', vm_host=target_host, cleanup='function')[1]

        vm_ids.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, cpu_pol=x, vm_host=target_host,
                                          prev_total_cpus=pre_boot_cpus[target_host])

    LOG.tc_step("Deleting both dedicated and shared vms")
    vm_helper.delete_vms(vms=vm_ids)

    post_delete_cpus = host_helper.get_vcpus_for_computes(field='used_now')
    assert post_delete_cpus == pre_test_cpus, "vcpu count after test does not equal vcpu count before test"
Exemple #15
0
def test_modify_mtu_data_interface(mtu_range, revert_data_mtu):
    """
    23) Change the MTU value of the data interface using CLI
    Verify that MTU on data interfaces on all compute node can be modified by cli
    The min mtu for data interface can be 1500,9000 or 9216, in which case MTU is unchangable. Need to confirm
    Args:
        mtu_range (str): A string that contain the mtu want to be tested
        revert_data_mtu: A fixture to restore changed mtus if any to their original values

    Setup:
        - Nothing

    Test Steps:
        - lock standby controller
        - modify the imtu value of the compute node
        - unlock the controller
        - check the compute node have expected mtu

    Teardown:
        - Revert data mtu

    """

    hypervisors = host_helper.get_hypervisors(state='up')
    if len(hypervisors) < 2:
        skip("Less than two hypervisors available.")

    if system_helper.is_aio_duplex():
        standby = system_helper.get_standby_controller_name()
        if not standby:
            skip("Standby controller unavailable on CPE system. Unable to lock host")
        hypervisors = [standby]
    else:
        if len(hypervisors) > 2:
            hypervisors = random.sample(hypervisors, 2)

    LOG.tc_step("Delete vms to reduce lock time")
    vm_helper.delete_vms()

    mtu = __get_mtu_to_mod(providernet_name='-data', mtu_range=mtu_range)

    LOG.tc_step("Modify data MTU to {} for hosts: {}".format(mtu, hypervisors))

    net_type = 'data'

    active_controller = system_helper.get_active_controller_name()
    hosts = hypervisors[:]
    if active_controller in hosts:
        hosts.remove(active_controller)
        hosts.append(active_controller)

    for host in hosts:
        interfaces = get_ifs_to_mod(host, net_type, mtu)
        revert_ifs = list(interfaces)
        if not revert_ifs:
            LOG.info('Skip host:{} because there is no interface to set MTU'.format(host))
            continue

        host_helper.lock_host(host, swact=True)

        revert_ifs.reverse()
        changed_ifs = []
        for interface in revert_ifs:
            LOG.tc_step('Checking the max MTU for the IF:{} on host:{}'.format(interface, host))
            max_mtu, cur_mtu, nic_name = get_max_allowed_mtus(host=host, network_type=net_type, if_name=interface)

            LOG.info('Checking the max MTU for the IF:{}, max MTU: {}, host:{}'.format(
                interface, max_mtu or 'NOT SET', host))

            expecting_pass = not max_mtu or mtu <= max_mtu
            if not expecting_pass:
                LOG.warn('Expecting to fail in changing MTU: changing to:{}, max-mtu:{}'.format(mtu, max_mtu))

            pre_mtu = int(host_helper.get_host_interface_values(host, interface, 'imtu')[0])

            LOG.tc_step('Modify MTU of IF:{} on host:{} to:{}, expeting: {}'.format(
                interface, host, mtu, 'PASS' if expecting_pass else 'FAIL'))

            code, res = host_helper.modify_mtu_on_interface(host, interface, mtu_val=mtu, network_type=net_type,
                                                            lock_unlock=False, fail_ok=True)
            msg_result = "PASS" if expecting_pass else "FAIL"
            msg = "Failed to modify data MTU, expecting to {}, \nnew MTU:{}, max MTU:{}, old MTU:{}, " \
                  "Return code:{}; Details: {}".format(msg_result, pre_mtu, max_mtu, pre_mtu, code, res)

            if 0 == code:
                if mtu != cur_mtu:
                    changed_ifs.append(interface)
                    HOSTS_IF_MODIFY_ARGS.append((host, pre_mtu, mtu, max_mtu, interface, net_type))
                assert expecting_pass, msg
            else:
                assert not expecting_pass, msg

            LOG.info('OK, modification of MTU of data interface {} as expected: {}'.format(msg_result, msg_result))

        host_helper.unlock_host(host)
        for interface in revert_ifs:
            if interface in changed_ifs:
                actual_mtu = int(host_helper.get_host_interface_values(host,
                                                                       interface=interface, fields=['imtu'])[0])
                assert actual_mtu == mtu, \
                    'Actual MTU after modification did not match expected, expected:{}, actual:{}'.format(
                        mtu, actual_mtu)
        changed_ifs[:] = []

    if not HOSTS_IF_MODIFY_ARGS:
        skip('No data interface changed!')
        return

    HOSTS_IF_MODIFY_ARGS.reverse()
def test_ping_between_vms_using_hostnames(func_recover):
    """
    This test includes a positive test and a negative test.

    Positive Test:
    Verify that VMs can interact using hostnames after internal dns is setup.

    Negative Test:
    Verify VMS can no longer interact with each other using hostnames after
    disabling internal dns.

    Args:
        - Nothing

    Setup:
        - Nothing

    Test Steps:
        - Delete existing VMs and volumes
        - Provision internal dns name resolution
        - Query DNS entries for subnet and store
        - If DNS entries are not present, set them to a default value
        - Delete dns servers for desired subnet
        - Launch two VMs in the same network
        - Log into the guests and ping the other VM
        - Restore DNS entries for subnet
        - Delete VMs and volumes created during test
        - Disable internal dns name resolution
        - Launch two new VMs in the same network
        - Log into the guest and ping the other VM (should fail)
        - Delete VMS and volumes created during test

    Returns:
        - Nothing

    Teardown:
        - Check the DNS Server entries
        - If not set, restore to original values

    """

    mgmt_net_id = func_recover
    subnet_list = network_helper.get_subnets(network=mgmt_net_id)

    LOG.tc_step("Store existing DNS entries so they can be restored later")
    dns_servers = network_helper.get_subnet_values(
        subnet_list[0], fields='dns_nameservers')[0].split(', ')
    if not dns_servers:
        LOG.tc_step("No DNS servers found. Setting DNS servers to defaults")
        dns_servers = DEFAULT_DNS_SERVERS
        set_dns_servers(subnet_list, dns_servers)

    global UNRESTORED_DNS_SERVERS
    UNRESTORED_DNS_SERVERS = dns_servers
    global HOSTS_AFFECTED
    hosts = host_helper.get_hypervisors()
    HOSTS_AFFECTED = hosts

    LOG.tc_step("Enabling internal dns resolution")
    provision_internal_dns(hosts=hosts)
    HOSTS_AFFECTED = []

    LOG.tc_step("Modify DNS entries for each subnet in the network")
    subnet_list = network_helper.get_subnets(network=mgmt_net_id)
    set_dns_servers(subnet_list)

    LOG.tc_step("Launch two VMs using the same network")
    nics = [{"net-id": mgmt_net_id}]
    vm1_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm2_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm1_name = vm_helper.get_vm_name_from_id(vm1_id)
    vm2_name = vm_helper.get_vm_name_from_id(vm2_id)

    LOG.tc_step("Log into each VM and ping the other VM using the hostname")
    cmd = "ping -c 3 {}".format(vm2_name)
    with vm_helper.ssh_to_vm_from_natbox(vm1_id) as vm_ssh:
        vm_ssh.exec_cmd(cmd, fail_ok=False)
    cmd = "ping -c 3 {}".format(vm1_name)
    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm_ssh:
        vm_ssh.exec_cmd(cmd, fail_ok=False)

    LOG.tc_step("Restore DNS entries for each subnet in the network")
    set_dns_servers(subnet_list, dns_servers)
    UNRESTORED_DNS_SERVERS = []

    LOG.tc_step("Cleanup VMs")
    vm_helper.delete_vms()

    LOG.tc_step("Disabling internal dns resolution")
    HOSTS_AFFECTED = hosts
    deprovision_internal_dns(hosts=hosts)
    HOSTS_AFFECTED = []

    LOG.tc_step("Launch two VMs using the same network")
    vm1_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm2_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm1_name = vm_helper.get_vm_name_from_id(vm1_id)
    vm2_name = vm_helper.get_vm_name_from_id(vm2_id)

    LOG.tc_step("Log into each VM and ping the other VM using the hostname")
    cmd = "ping -c 3 {}".format(vm2_name)
    with vm_helper.ssh_to_vm_from_natbox(vm1_id) as vm_ssh:
        rc, out = vm_ssh.exec_cmd(cmd, fail_ok=True)
        assert rc == 2, out
    cmd = "ping -c 3 {}".format(vm1_name)
    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm_ssh:
        rc, out = vm_ssh.exec_cmd(cmd, fail_ok=True)
        assert rc == 2, out

    LOG.tc_step("Cleanup VMs")
    vm_helper.delete_vms()