Exemplo n.º 1
0
def _change_mac_address(vm_id, prev_mac_addr, new_mac_addr=None):
    """
    ip link set <dev> up, and dhclient <dev> to bring up the interface of last nic for given VM
    Args:
        vm_id (str):
    """
    if not new_mac_addr:
        new_mac_addr = prev_mac_addr[:-1] + ('2' if prev_mac_addr.endswith('1')
                                             else '1')

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        eth_name = network_helper.get_eth_for_mac(mac_addr=prev_mac_addr,
                                                  ssh_client=vm_ssh)
        vm_ssh.exec_cmd('ip addr')
        vm_ssh.exec_sudo_cmd('ifconfig {} down'.format(eth_name),
                             fail_ok=False)
        vm_ssh.exec_sudo_cmd('sudo ifconfig {} hw ether {}'.format(
            eth_name, new_mac_addr),
                             fail_ok=False)
        vm_ssh.exec_sudo_cmd('ifconfig {} up'.format(eth_name), fail_ok=False)
        vm_ssh.exec_cmd(
            'ip addr | grep --color=never -B 1 -A 1 {}'.format(new_mac_addr),
            fail_ok=False)

    return new_mac_addr
Exemplo n.º 2
0
def test_boot_windows_guest():
    """
    Boot a windows guest to assist for manual testing on windows guest
    """
    # Change the following parameters to change the vm type.
    guest = 'win_2012'  # such as tis-centos-guest
    storage = 'local_image'  # local_lvm, local_image, or remote
    boot_source = 'image'  # volume or image

    LOG.tc_step("Get/Create {} glance image".format(guest))
    glance_helper.get_guest_image(guest_os=guest)

    LOG.tc_step("Create flavor with {} storage backing".format(storage))
    flv_id = nova_helper.create_flavor(name='{}-{}'.format(storage, guest),
                                       vcpus=4,
                                       ram=8192,
                                       storage_backing=storage,
                                       guest_os=guest)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    LOG.tc_step("Boot {} vm".format(guest))
    vm_id = vm_helper.boot_vm(name='{}-{}'.format(guest, storage),
                              flavor=flv_id,
                              guest_os=guest,
                              source=boot_source)[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        code, output = vm_ssh.exec_cmd('pwd', fail_ok=False)
        LOG.info(output)

    LOG.info(
        "{} is successfully booted from {} with {} storage backing".format(
            guest, boot_source, storage))
Exemplo n.º 3
0
def test_kpi_cyclictest_vm(collect_kpi, prepare_test_session,
                           get_rt_guest_image, get_hypervisor,
                           add_admin_role_func):
    if not collect_kpi:
        skip("KPI only test.  Skip due to kpi collection is not enabled")

    hypervisor = get_hypervisor
    testable_hypervisors[hypervisor]['for_vm_test'] = True
    LOG.info('Hypervisor chosen to host rt vm: {}'.format(hypervisor))

    vm_id, vcpu_count, non_rt_core = create_rt_vm(hypervisor)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    cyclictest_dir = '/root/cyclictest/'
    program = os.path.join(os.path.normpath(cyclictest_dir),
                           os.path.basename(CYCLICTEST_EXE))
    program_active_con = os.path.join(os.path.normpath(CYCLICTEST_DIR),
                                      os.path.basename(CYCLICTEST_EXE))

    cpu_info = {
        'vm_cores': [id_ for id_ in range(vcpu_count) if id_ != non_rt_core]
    }

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        prep_test_on_host(vm_ssh,
                          vm_id,
                          program_active_con,
                          ControllerClient.get_active_controller().host,
                          cyclictest_dir=cyclictest_dir)
        run_log, hist_file = run_cyclictest(vm_ssh,
                                            program,
                                            vm_id,
                                            cyclictest_dir=cyclictest_dir,
                                            cpu_info=cpu_info)

        LOG.info("Process and upload test results")
        local_run_log, local_hist_file = fetch_results_from_target(
            target_ssh=vm_ssh,
            target_host=vm_id,
            run_log=run_log,
            hist_file=hist_file,
            is_guest=True)

    testable_hypervisors[hypervisor]['for_vm_test'] = False

    avg_val, six_nines_val = calculate_results(run_log=local_run_log,
                                               hist_file=local_hist_file,
                                               cores_to_ignore=None,
                                               num_cores=(vcpu_count - 1))

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=CyclicTest.NAME_VM_AVG,
                              kpi_val=avg_val,
                              uptime=15,
                              unit=CyclicTest.UNIT)
    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=CyclicTest.NAME_VM_6_NINES,
                              kpi_val=six_nines_val,
                              uptime=15,
                              unit=CyclicTest.UNIT)
Exemplo n.º 4
0
def _check_log_messages(vm_id, hard):
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        code, output = vm_ssh.exec_sudo_cmd(
            'cat /var/log/messages | grep -v grep | grep "logger: POWER BUTTON"'
        )
        LOG.info("Output: {}".format(output))
        LOG.info("Result code: {}".format(code))
        assert hard == code, "There should not be any output if reboot or stop with hard"
Exemplo n.º 5
0
def _modify_guest_acpi_file(vm_id):
    power_file = '/etc/acpi/actions/power.sh'
    text = '"POWER BUTTON WAS PRESSED: $1"'
    LOG.tc_step("Modify {} file to add line {}".format(power_file, text))
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        vm_ssh.exec_sudo_cmd("sed -e '3i /usr/bin/logger {}' -i {}".format(
            text, power_file))
        vm_ssh.exec_sudo_cmd("head -n 5 {}".format(power_file))
Exemplo n.º 6
0
def _ping_server(vm_id, ip_addr, fail_ok):
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id) as vm_ssh:
        packet_loss_rate = network_helper.ping_server(ip_addr,
                                                      ssh_client=vm_ssh,
                                                      fail_ok=fail_ok,
                                                      retry=10)[0]

    return packet_loss_rate
def _install_sw_packages_in_vm(vm_id):
    """
    install nc inside guest
    Args:
        vm_id (str):
    """
    with vm_helper.ssh_to_vm_from_natbox(vm_id, ) as vm_ssh:
        vm_ssh.exec_sudo_cmd('yum install nc -y', searchwindowsize=100)
def _ssh_to_sfc_vm_and_wait_for_packets(start_event,
                                        end_event,
                                        received_event,
                                        vm_id,
                                        protocol,
                                        nsh_aware,
                                        symmetric,
                                        timeout=300):
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as root_ssh:
        LOG.info(
            "Verify the tool received {} packets from VM1 to VM2 and you can see the pkts coming in"
            .format(protocol))
        cmd = 'ifconfig'
        root_ssh.send(cmd)
        nsh_type = 'eth'
        if nsh_aware:
            nsh_type = 'eth_nsh'
        # nsh_type = 'eth_nsh' if nsh_aware == 'yes' else 'eth'
        LOG.info("nsh aware {} nsh type".format(nsh_aware, nsh_type))
        cmd = 'python ./vxlan_tool.py -i eth1 -o eth2 -d forward -t {}'.format(
            nsh_type)
        root_ssh.send(cmd)
        start_event.set()

        packet_num = 8 if symmetric else 4
        blob_list = 'Forwarding packet'
        if nsh_aware:
            if protocol != 'icmp':
                blob_list = 'Packet #{}'.format(packet_num)
            else:
                blob_list = 'Packet #'

        # if nsh_aware:
        #     blob_list = 'Packet #{}'.format(packet_num)

        def _check_receive_event():
            # set receive event if msg received
            index = root_ssh.expect(blob_list=blob_list,
                                    timeout=10,
                                    fail_ok=True)
            if index == 0:
                LOG.info("Received packet in SFC VM: {}".format(vm_id))
                received_event.set()

        end_time = time.time() + timeout
        while time.time() < end_time:
            # Exit the vm ssh, end thread
            if end_event.is_set():
                if not received_event.is_set():
                    _check_receive_event()

                root_ssh.send_control()
                root_ssh.expect(timeout=10, fail_ok=True)
                return

            _check_receive_event()
            time.sleep(5)
Exemplo n.º 9
0
def test_migration_auto_converge(no_simplex):
    """
    Auto converge a VM with stress-ng running

    Test Steps:
        - Create flavor
        - Create a heat stack (launch a vm with stress-ng)
        - Perform live-migration and verify connectivity

    Test Teardown:
        - Delete stacks,vm, flavors created

    """

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=3)[1]
    ResourceCleanup.add('flavor', flavor_id)

    # add migration timout
    extra_specs = {FlavorSpec.LIVE_MIG_TIME_OUT: 300}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Get the heat file name to use")
    heat_template = _get_stress_ng_heat()

    stack_name = vm_name = 'stress_ng'
    LOG.tc_step("Creating heat stack")
    code, msg = heat_helper.create_stack(stack_name=stack_name,
                                         template=heat_template,
                                         parameters={
                                             'flavor': flavor_id,
                                             'name': vm_name
                                         },
                                         cleanup='function')
    assert code == 0, "Failed to create heat stack"

    LOG.info("Verifying server creation via heat")
    vm_id = vm_helper.get_vm_id_from_name(vm_name='stress_ng', strict=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        LOG.tc_step("Check for Stress-ng inside vm")
        assert 0 == wait_for_stress_ng(vm_ssh), " Stress-ng is not running"

    for vm_actions in [['live_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Exemplo n.º 10
0
def _send_srv_grp_msg(vm_id, msg, timeout, sent_event, rcv_event):
    with vm_helper.ssh_to_vm_from_natbox(vm_id, close_ssh=False) as sender_ssh:
        sender_ssh.send("server_group_app '{}'".format(msg))
        sender_ssh.expect('\r\n\r\n')
        if sent_event:
            sent_event.set()

        if not isinstance(rcv_event, list):
            rcv_event = [rcv_event]

        for event in rcv_event:
            event.wait_for_event(timeout=timeout)
def _ping_from_source_to_dest_vm(vm_id, end_event, dest_vm_internal_net_ip):
    """
    ping -c 4 <dest vm internal ip>
    Args:
        vm_id (str):
    """
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        cmd = 'ping -c 4 {}'.format(dest_vm_internal_net_ip)
        vm_ssh.send(cmd)
        time.sleep(1)
        vm_ssh.send_control()
        vm_ssh.expect(timeout=10, fail_ok=True)
        end_event.set()
def check_scp_to_vm(vm_id, vm_user, vm_password, vm_ip, vm_ext_port,
                    expect_filename):

    with vm_helper.ssh_to_vm_from_natbox(vm_id,
                                         vm_image_name='ubuntu_14',
                                         username=vm_user,
                                         password=vm_password,
                                         vm_ip=vm_ip,
                                         vm_ext_port=vm_ext_port) as vm_ssh:
        cmd = "test -f {}".format(expect_filename)
        rc, output = vm_ssh.exec_cmd(cmd)

    return rc, output
Exemplo n.º 13
0
def _check_secure_boot_on_vm(vm_id):
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    with vm_helper.ssh_to_vm_from_natbox(vm_id,
                                         username='******',
                                         password='******',
                                         retry_timeout=360,
                                         timeout=60) as vm_ssh:
        code, output = vm_ssh.exec_cmd('mokutil --sb-state', fail_ok=False)
        assert "SecureBoot enabled" in output, "Vm did not boot in secure mode: {}".format(
            output)
        # vm_ssh.exec_cmd('cat /var/log/cloud-init.log', expect_timeout=300)
        # vm_ssh.exec_cmd('cat /var/log/cloud-init-output.log')
        vm_ssh.exec_cmd('sync')
Exemplo n.º 14
0
def check_qat_service(vm_id, qat_devs, run_cpa=True, timeout=600):
    """
    Check qat device and service on given vm
    Args:
        vm_id (str):
        qat_devs (dict): {<qat-dev1-name>: <number1>, <qat-dev2-name>: <number2>}
            e.g., {'Intel Corporation DH895XCC Series QAT Virtual Function [8086:0443]' : 32}
        run_cpa (bool): whether to run cpa_sample_code in guest, it could take long time when there are many qat-vfs
        timeout (int): timeout value to wait for cpa_sample_code to finish

    Returns:

    """
    if qat_devs:
        LOG.tc_step("Check qat-vfs on vm {}".format(vm_id))
    else:
        LOG.tc_step("Check no qat device exist on vm {}".format(vm_id))
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id) as vm_ssh:
        code, output = vm_ssh.exec_sudo_cmd(
            'lspci -nn | grep --color=never QAT', fail_ok=True)
        if not qat_devs:
            assert 1 == code
            return

        assert 0 == code, "No QAT device exists on vm {}".format(vm_id)
        for dev, expt_count in qat_devs.items():
            actual_count = 0
            for line in output.splitlines():
                if dev in line:
                    actual_count += 1
            assert expt_count == actual_count, "qat device count for {} is {} while expecting {}".format(
                dev, actual_count, expt_count)

        check_status_cmd = "systemctl status qat_service | grep '' --color=never"
        status = vm_ssh.exec_sudo_cmd(check_status_cmd)[1]
        active_str = 'Active: active'
        if active_str not in status:
            LOG.info("Start qat service")
            vm_ssh.exec_sudo_cmd('systemctl start qat_service', fail_ok=False)
            status = vm_ssh.exec_sudo_cmd(check_status_cmd, fail_ok=False)[1]
            assert active_str in status, "qat_service is not in active state"

        if run_cpa:
            LOG.info("Run cpa_sample_code on quickAssist hardware")
            output = vm_ssh.exec_sudo_cmd('cpa_sample_code signOfLife=1',
                                          fail_ok=False,
                                          expect_timeout=timeout)[1]
            assert 'error' not in output.lower(), "cpa_sample_code test failed"
            LOG.info("cpa_sample_code test completed successfully")
Exemplo n.º 15
0
def _access_metadata_server_from_vm(vm_id):
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        vm_ssh.exec_cmd('ip route')
        command = 'wget http://{}/openstack/latest/meta_data.json'.format(
            METADATA_SERVER)
        vm_ssh.exec_cmd(command, fail_ok=False)
        metadata = vm_ssh.exec_cmd('more meta_data.json', fail_ok=False)[1]

    LOG.tc_step("Ensure vm uuid from metadata server is the same as nova show")
    metadata = metadata.replace('\n', '')
    LOG.info(metadata)
    metadata_uuid = eval(metadata)['uuid']

    assert vm_id == metadata_uuid, "VM UUID retrieved from metadata server " \
                                   "is not the same as nova show"
Exemplo n.º 16
0
def is_vm_filesystem_rw(vm_id, rootfs='vda', vm_image_name=None):
    """

    Args:
        vm_id:
        rootfs (str|list):
        vm_image_name (None|str):

    Returns:

    """
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=240)

    if vm_image_name is None:
        vm_image_name = GuestImages.DEFAULT['guest']

    router_host = dhcp_host = None
    try:
        LOG.info(
            "---------Collecting router and dhcp agent host info-----------")
        router_host = network_helper.get_router_host()
        mgmt_net = network_helper.get_mgmt_net_id()
        dhcp_host = network_helper.get_network_agents(field='Host',
                                                      network=mgmt_net)

        if isinstance(rootfs, str):
            rootfs = [rootfs]
        for fs in rootfs:
            vm_helper.mount_attached_volume(vm_id=vm_id, rootfs=fs)
        with vm_helper.ssh_to_vm_from_natbox(vm_id,
                                             vm_image_name=vm_image_name,
                                             retry_timeout=300) as vm_ssh:
            for fs in rootfs:
                cmd = "mount | grep {} | grep rw | wc -l".format(fs)
                cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1]
                if cmd_output != '1':
                    LOG.info("Filesystem /dev/{} is not rw for VM: {}".format(
                        fs, vm_id))
                    return False
            return True
    except exceptions.SSHRetryTimeout:
        LOG.error("Failed to ssh, collecting vm console log.")
        vm_helper.get_console_logs(vm_ids=vm_id)
        LOG.info("Router host: {}. dhcp agent host: {}".format(
            router_host, dhcp_host))
        raise
def _send_hello_message_from_vm_using_tcp_client(vm_id, end_event,
                                                 dest_vm_internal_net_ip):
    """

    nc <internal ip of dest vm> <port>
    nc -lp 20010
    Args:
        vm_id (str):

    """
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        cmd = "./loop_tcp_client.sh '{}'".format(dest_vm_internal_net_ip)
        vm_ssh.send(cmd)
        time.sleep(1)
        vm_ssh.send_control()
        vm_ssh.expect(timeout=10, fail_ok=True)
        end_event.set()
Exemplo n.º 18
0
def _bring_up_vlan_interface(vm_id, eth_name, vlan_ids):
    """
    ip link set <dev> up, and dhclient <dev> to bring up the interface of last nic for given VM
    Args:
        vm_id (str): VM to configure the vlan interface
        eth_name (str): eth interface name to add the vlan if
        vlan_ids (list): list of vlan ids to add
    """
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        for vlan in vlan_ids:
            tmp_list = [eth_name, str(vlan)]
            sub_if = '.'.join(tmp_list)
            vm_ssh.exec_sudo_cmd(
                'ip link add link {} name {} type vlan id {}'.format(
                    eth_name, sub_if, vlan))
            vm_ssh.exec_sudo_cmd('dhclient {}'.format(sub_if))

        vm_ssh.exec_sudo_cmd('ip addr')
Exemplo n.º 19
0
def create_vm_values_for_type(vm_type, flavor=None):
    global g_flavors, g_vms

    LOG.info('Creating VM for vTPM using flavor:' + g_flavors[vm_type])

    flavor = flavor if flavor is not None else g_flavors[vm_type]
    code, vm_id, msg = vm_helper.boot_vm(name='vm-{}'.format(vm_type),
                                         flavor=flavor,
                                         cleanup='function')

    vm_values = {'id': vm_id}
    g_vms[vm_type] = vm_values

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_values['id']) as ssh_to_vm:
        vm_values['values'] = create_values(ssh_to_vm, vm_type)

    return vm_id
def _send_hello_message_from_vm(vm_id, greeting, end_event,
                                dest_vm_internal_net_ip, port, protocol):
    """

    nc <internal ip of dest vm> <port>
    nc -lp 20010
    Args:
        vm_id (str):
        greeting (str): hello

    """
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        udp_param = '-u' if protocol == 'udp' else ''
        cmd = 'nc {} {} {}'.format(udp_param, dest_vm_internal_net_ip, port)
        vm_ssh.send(cmd)
        vm_ssh.send(greeting)
        time.sleep(1)
        vm_ssh.send_control()
        vm_ssh.expect(timeout=10, fail_ok=True)
        end_event.set()
Exemplo n.º 21
0
def check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.PING_VM):
    """
    Args:
        vm_id:
        ping_timeout

    Returns:

    """
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=ping_timeout)
    dev = '/dev/hd'
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        # Run mount command to determine the /dev/hdX is mount at:
        cmd = """mount | grep "{}" | awk '{{print  $3}} '""".format(dev)
        mount = vm_ssh.exec_cmd(cmd)[1]
        assert mount, "{} is not mounted".format(dev)

        file_path = '{}/openstack/latest/meta_data.json'.format(mount)
        content = vm_ssh.exec_cmd('python -m json.tool {} | grep '
                                  'foo'.format(file_path), fail_ok=False)[1]
        assert '"foo": "bar"' in content
Exemplo n.º 22
0
def _bring_up_interface(vm_id):
    """
    Set up the network scripts to auto assign the interface Ipv6 addr
    Args:
        vm_id (str): VM to configure the vlan interface

    """
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    cmds_to_exe = [
        "ls /etc/sysconfig/network-scripts/",
        "sed -i -- 's/IPV6INIT=no/IPV6INIT=yes/g' /etc/sysconfig/network-scripts/ifcfg-eth1",
        "sed -i '1 i\DHCPV6C=yes' /etc/sysconfig/network-scripts/ifcfg-eth1",
        "sed -i '1 a NETWORKING_IPV6=yes' /etc/sysconfig/network",
        "systemctl restart network"
    ]
    time.sleep(10)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        for cmd in cmds_to_exe:
            vm_ssh.exec_sudo_cmd('{}'.format(cmd))

        vm_ssh.exec_sudo_cmd('ip addr')
    return 0
Exemplo n.º 23
0
def test_vmx_setting():
    """
    Test that vmx feature can be set in guest VM.

    Test Steps:
       - Create a flavor with extra specs hw:wrs:nested_vmx=True and hw:cpu_model=<a cpu model supported by the host>
       - Instantiate a VM with the flavor and check that vm has correct vcpu model
       - ssh into the VM and execute "grep vmx /proc/cpuinfo" and verify that vmx feature is set
    """

    # Create a flavor with specs: hw:wrs:nested_vmx=True and extraspec hw:cpu_model=<compute host cpu model>

    host_cpu_model = 'Passthrough'
    LOG.tc_step("Create flavor for vcpu model {}".format(host_cpu_model))
    flavor_id = nova_helper.create_flavor(fail_ok=False)[1]
    ResourceCleanup.add('flavor', flavor_id)

    LOG.tc_step(
        "Set extra specs for flavor of vcpu model {}".format(host_cpu_model))
    extra_specs = {
        FlavorSpec.NESTED_VMX: True,
        FlavorSpec.VCPU_MODEL: host_cpu_model
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Create VM for vcpu model {}".format(host_cpu_model))
    code, vm, msg = vm_helper.boot_vm(flavor=flavor_id,
                                      cleanup='function',
                                      fail_ok=False)
    ResourceCleanup.add('vm', vm)
    LOG.tc_step("Check vcpu model is correct")
    host = vm_helper.get_vm_host(vm)
    expt_arch = host_helper.get_host_cpu_model(host)
    check_vm_cpu_model(vm_id=vm, vcpu_model='Passthrough', expt_arch=expt_arch)

    LOG.tc_step("Checking to see if 'vmx' is in /proc/cpuinfo")
    with vm_helper.ssh_to_vm_from_natbox(vm) as vm_ssh:
        vm_ssh.exec_cmd("grep vmx /proc/cpuinfo", fail_ok=False)
Exemplo n.º 24
0
def _wait_for_srv_grp_msg(vm_id, msg, timeout, res_events, listener_event,
                          sent_event):
    with vm_helper.ssh_to_vm_from_natbox(vm_id, retry_timeout=60) as vm_ssh:
        vm_ssh.send('server_group_app')
        # vm_ssh.expect('\r\n\r\n', timeout=1, searchwindowsize=100)
        listener_event.set()
        sent_event.wait_for_event()
        received_event = Events(
            "Server group message received on VM {}".format(vm_id))
        res_events.append(received_event)
        end_time = time.time() + timeout
        while time.time() < end_time:
            code = vm_ssh.expect('\r\n\r\n', fail_ok=True, timeout=timeout)
            if code < 0:
                assert False, "No more server group notification received. Expected msg not found."

            current_output = vm_ssh.cmd_output
            if re.search(msg, current_output):
                received_event.set()
                vm_ssh.send_control('c')
                vm_ssh.expect(searchwindowsize=100, timeout=5)
                break
        else:
            assert False, "Expected msg did not appear within timeout"
Exemplo n.º 25
0
def check_vm_files(vm_id,
                   storage_backing,
                   ephemeral,
                   swap,
                   vm_type,
                   file_paths,
                   content,
                   root=None,
                   vm_action=None,
                   prev_host=None,
                   post_host=None,
                   disks=None,
                   post_disks=None,
                   guest_os=None,
                   check_volume_root=False):
    """
    Check the files on vm after specified action. This is to check the disks
    in the basic nova matrix table.
    Args:
        vm_id (str):
        storage_backing (str): local_image, local_lvm, or remote
        root (int): root disk size in flavor. e.g., 2, 5
        ephemeral (int): e.g., 0, 1
        swap (int): e.g., 0, 512
        vm_type (str): image, volume, image_with_vol, vol_with_vol
        file_paths (list): list of file paths to check
        content (str): content of the files (assume all files have the same
        content)
        vm_action (str|None): live_migrate, cold_migrate, resize, evacuate,
            None (expect no data loss)
        prev_host (None|str): vm host prior to vm_action. This is used to
        check if vm host has changed when needed.
        post_host (None|str): vm host after vm_action.
        disks (dict): disks that are returned from
        vm_helper.get_vm_devices_via_virsh()
        post_disks (dict): only used in resize case
        guest_os (str|None): default guest assumed for None. e,g., ubuntu_16
        check_volume_root (bool): whether to check root disk size even if vm
        is booted from image

    Returns:

    """
    final_disks = post_disks if post_disks else disks
    final_paths = list(file_paths)
    if not disks:
        disks = vm_helper.get_vm_devices_via_virsh(vm_id=vm_id)

    eph_disk = disks.get('eph', {})
    if not eph_disk:
        if post_disks:
            eph_disk = post_disks.get('eph', {})
    swap_disk = disks.get('swap', {})
    if not swap_disk:
        if post_disks:
            swap_disk = post_disks.get('swap', {})

    disk_check = 'no_loss'
    if vm_action in [None, 'live_migrate']:
        disk_check = 'no_loss'
    elif vm_type == 'volume':
        # boot-from-vol, non-live migrate actions
        disk_check = 'no_loss'
        if storage_backing == 'local_lvm' and (eph_disk or swap_disk):
            disk_check = 'eph_swap_loss'
        elif storage_backing == 'local_image' and vm_action == 'evacuate' and (
                eph_disk or swap_disk):
            disk_check = 'eph_swap_loss'
    elif storage_backing == 'local_image':
        # local_image, boot-from-image, non-live migrate actions
        disk_check = 'no_loss'
        if vm_action == 'evacuate':
            disk_check = 'local_loss'
    elif storage_backing == 'local_lvm':
        # local_lvm, boot-from-image, non-live migrate actions
        disk_check = 'local_loss'
        if vm_action == 'resize':
            post_host = post_host if post_host else vm_helper.get_vm_host(
                vm_id)
            if post_host == prev_host:
                disk_check = 'eph_swap_loss'

    LOG.info("disk check type: {}".format(disk_check))
    loss_paths = []
    if disk_check == 'no_loss':
        no_loss_paths = final_paths
    else:
        # If there's any loss, we must not have remote storage. And any
        # ephemeral/swap disks will be local.
        disks_to_check = disks.get('eph', {})
        # skip swap type checking for data loss since it's not a regular
        # filesystem
        # swap_disks = disks.get('swap', {})
        # disks_to_check.update(swap_disks)

        for path_ in final_paths:
            # For tis-centos-guest, ephemeral disk is mounted to /mnt after
            # vm launch.
            if str(path_).rsplit('/', 1)[0] == '/mnt':
                loss_paths.append(path_)
                break

        for disk in disks_to_check:
            for path in final_paths:
                if disk in path:
                    # We mount disk vdb to /mnt/vdb, so this is looking for
                    # vdb in the mount path
                    loss_paths.append(path)
                    break

        if disk_check == 'local_loss':
            # if vm booted from image, then the root disk is also local disk
            root_img = disks.get('root_img', {})
            if root_img:
                LOG.info(
                    "Auto mount vm disks again since root disk was local with "
                    "data loss expected")
                vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=final_disks)
                file_name = final_paths[0].rsplit('/')[-1]
                root_path = '/{}'.format(file_name)
                loss_paths.append(root_path)
                assert root_path in final_paths, \
                    "root_path:{}, file_paths:{}".format(root_path, final_paths)

        no_loss_paths = list(set(final_paths) - set(loss_paths))

    LOG.info("loss_paths: {}, no_loss_paths: {}, total_file_pahts: {}".format(
        loss_paths, no_loss_paths, final_paths))
    res_files = {}
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id,
                                         vm_image_name=guest_os) as vm_ssh:
        vm_ssh.exec_sudo_cmd('cat /etc/fstab')
        vm_ssh.exec_sudo_cmd("mount | grep --color=never '/dev'")

        for file_path in loss_paths:
            vm_ssh.exec_sudo_cmd('touch {}2'.format(file_path), fail_ok=False)
            vm_ssh.exec_sudo_cmd('echo "{}" >> {}2'.format(content, file_path),
                                 fail_ok=False)

        for file_path in no_loss_paths:
            output = vm_ssh.exec_sudo_cmd('cat {}'.format(file_path),
                                          fail_ok=False)[1]
            res = '' if content in output else 'content mismatch'
            res_files[file_path] = res

        for file, error in res_files.items():
            assert not error, "Check {} failed: {}".format(file, error)

        swap_disk = final_disks.get('swap', {})
        if swap_disk:
            disk_name = list(swap_disk.keys())[0]
            partition = '/dev/{}'.format(disk_name)
            if disk_check != 'local_loss' and not disks.get('swap', {}):
                mount_on, fs_type = storage_helper.mount_partition(
                    ssh_client=vm_ssh,
                    disk=disk_name,
                    partition=partition,
                    fs_type='swap')
                storage_helper.auto_mount_fs(ssh_client=vm_ssh,
                                             fs=partition,
                                             mount_on=mount_on,
                                             fs_type=fs_type)

            LOG.info("Check swap disk is on")
            swap_output = vm_ssh.exec_sudo_cmd(
                'cat /proc/swaps | grep --color=never {}'.format(partition))[1]
            assert swap_output, "Expect swapon for {}. Actual output: {}". \
                format(partition, vm_ssh.exec_sudo_cmd('cat /proc/swaps')[1])

            LOG.info("Check swap disk size")
            _check_disk_size(vm_ssh, disk_name=disk_name, expt_size=swap)

        eph_disk = final_disks.get('eph', {})
        if eph_disk:
            LOG.info("Check ephemeral disk size")
            eph_name = list(eph_disk.keys())[0]
            _check_disk_size(vm_ssh, eph_name, expt_size=ephemeral * 1024)

        if root:
            image_root = final_disks.get('root_img', {})
            root_name = ''
            if image_root:
                root_name = list(image_root.keys())[0]
            elif check_volume_root:
                root_name = list(final_disks.get('root_vol').keys())[0]

            if root_name:
                LOG.info("Check root disk size")
                _check_disk_size(vm_ssh,
                                 disk_name=root_name,
                                 expt_size=root * 1024)
Exemplo n.º 26
0
def _check_vm_topology_on_vm(vm_id,
                             vcpus,
                             siblings_total,
                             current_vcpus=None,
                             prev_siblings=None,
                             guest=None,
                             check_sibling=True):
    siblings_total_ = None
    if siblings_total:
        siblings_total_ = copy.deepcopy(siblings_total)
    # Check from vm in /proc/cpuinfo and
    # /sys/devices/.../cpu#/topology/thread_siblings_list
    if not guest:
        guest = ''
    if not current_vcpus:
        current_vcpus = int(vcpus)

    LOG.info(
        '=== Check vm topology from within the vm via: /sys/devices/system/cpu'
    )
    actual_sibs = []
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:

        win_expt_cores_per_sib = win_log_count_per_sibling = None
        if 'win' in guest:
            LOG.info(
                "{}Check windows guest cores via wmic cpu get cmds".format(
                    SEP))
            offline_cores_count = 0
            log_cores_count, win_log_count_per_sibling = \
                get_procs_and_siblings_on_windows(vm_ssh)
            online_cores_count = present_cores_count = log_cores_count
        else:
            LOG.info(
                "{}Check vm present|online|offline cores from inside vm via "
                "/sys/devices/system/cpu/".format(SEP))
            present_cores, online_cores, offline_cores = \
                vm_helper.get_proc_nums_from_vm(vm_ssh)
            present_cores_count = len(present_cores)
            online_cores_count = len(online_cores)
            offline_cores_count = len(offline_cores)

        assert vcpus == present_cores_count, \
            "Number of vcpus: {}, present cores: {}".format(
                vcpus, present_cores_count)
        assert current_vcpus == online_cores_count, \
            "Current vcpus for vm: {}, online cores: {}".format(
                current_vcpus, online_cores_count)

        expt_total_cores = online_cores_count + offline_cores_count
        assert expt_total_cores in [present_cores_count, 512], \
            "Number of present cores: {}. online+offline cores: {}".format(
                vcpus, expt_total_cores)

        if check_sibling and siblings_total_ and online_cores_count == \
                present_cores_count:
            expt_sibs_list = [[vcpu] for vcpu in
                              range(present_cores_count)] if not \
                siblings_total_ \
                else siblings_total_

            expt_sibs_list = [sorted(expt_sibs_list)]
            if prev_siblings:
                # siblings_total may get modified here
                expt_sibs_list.append(sorted(prev_siblings))

            if 'win' in guest:
                LOG.info("{}Check windows guest siblings via wmic cpu get "
                         "cmds".format(SEP))
                expt_cores_list = []
                for sib_list in expt_sibs_list:
                    win_expt_cores_per_sib = [len(vcpus) for vcpus in sib_list]
                    expt_cores_list.append(win_expt_cores_per_sib)
                assert win_log_count_per_sibling in expt_cores_list, \
                    "Expected log cores count per sibling: {}, actual: {}".\
                    format(win_expt_cores_per_sib, win_log_count_per_sibling)

            else:
                LOG.info("{}Check vm /sys/devices/system/cpu/["
                         "cpu#]/topology/thread_siblings_list".format(SEP))
                for cpu in [
                        'cpu{}'.format(i) for i in range(online_cores_count)
                ]:
                    actual_sibs_for_cpu = \
                        vm_ssh.exec_cmd(
                            'cat /sys/devices/system/cpu/{}/topology/thread_'
                            'siblings_list'.format(cpu), fail_ok=False)[1]

                    sib_for_cpu = common.parse_cpus_list(actual_sibs_for_cpu)
                    if sib_for_cpu not in actual_sibs:
                        actual_sibs.append(sib_for_cpu)

                assert sorted(
                    actual_sibs) in expt_sibs_list, "Expt sib lists: {}, " \
                                                    "actual sib list: {}". \
                    format(expt_sibs_list, sorted(actual_sibs))
Exemplo n.º 27
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
Exemplo n.º 28
0
def test_ntfs(stx_openstack_required, host_type="controller"):
    """
    This test will test NTFS mount and NTFS formatted device creation on a TiS
    system.

    Arguments:
    - host_type (string) - host type to be tested, e.g. controller, compute,
      storage

    Returns:
    - Nothing

    Test Steps:
    1.  Check if desired host has USB inserted.  If not, skip
    2.  Wipe USB
    3.  Change label of device
    4.  Create partitions on NTFS device
    5.  Format partitions
    4.  Copy large image to NTFS mount point
    5.  Test mount and big file creation on NTFS mounted device
    """

    # Could pass these in through parametrize instead
    mount_type = "ntfs"
    mount_point = "/media/ntfs/"
    guest_os = 'win_2012'
    boot_source = "image"

    host, usb_device = locate_usb(host_type, min_size=13)
    if not host:
        skip("No USB hardware found on {} host type".format(host_type))

    hosts_with_image_backing = host_helper.get_hosts_in_storage_backing(storage_backing='image')
    if len(hosts_with_image_backing) == 0:
        skip("No hosts with image backing present")

    # if the host with the USB is not the active controler, swact controllers
    con_ssh = ControllerClient.get_active_controller()
    active_controller = system_helper.get_active_controller_name(con_ssh)
    if host != active_controller:
        host_helper.swact_host()

    with host_helper.ssh_to_host(host) as host_ssh:
        wipe_usb(host_ssh, usb_device)
        umount_usb(host_ssh, mount_point=mount_point)
        create_usb_label(host_ssh, usb_device, label="msdos")
        create_usb_partition(host_ssh, usb_device, startpt="0", endpt="2048")
        format_usb(host_ssh, usb_device, partition="1")
        create_usb_partition(host_ssh, usb_device, startpt="2049", endpt="100%")
        format_usb(host_ssh, usb_device, partition="2")
        mount_usb(host_ssh, usb_device, partition="2", mount_type=mount_type, mount_point=mount_point)

    LOG.tc_step("Copy the windows guest image to the mount point")
    src_img = glance_helper.scp_guest_image(img_os=guest_os, dest_dir=mount_point, con_ssh=con_ssh)

    LOG.tc_step("Create flavor for windows guest image")
    flv_id = nova_helper.create_flavor(name=guest_os, vcpus=4, ram=8192, storage_backing="local_image",
                                       guest_os=guest_os)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: "dedicated"})
    ResourceCleanup.add("flavor", flv_id)

    LOG.tc_step("Import image into glance")
    glance_helper.create_image(name=guest_os, source_image_file=src_img, disk_format="qcow2",
                               container_format="bare", con_ssh=con_ssh, cleanup="function")

    LOG.tc_step("Boot VM")
    vm_id = vm_helper.boot_vm(name=guest_os, flavor=flv_id, guest_os=guest_os, source=boot_source, cleanup="function")[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        output = vm_ssh.exec_cmd('pwd', fail_ok=False)[1]
        LOG.info(output)
Exemplo n.º 29
0
def test_evacuate_pci_vm(vif_model_check):
    """
    Test evacuate vm with multiple ports on same network

    Args:

    Setups:
        - create a flavor with dedicated cpu policy (module)
        - choose one tenant network and one internal network to be used by test (module)
        - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
        - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
        and ping it from NatBox     (class)
        - Ping vm2's own data network ips       (class)
        - Ping vm2 from vm1 to verify management and data networks connection   (class)

    Test Steps:
        - Reboot vm2 host
        - Wait for vm2 to be evacuated to other host
        - Wait for vm2 pingable from NatBox
        - Verify ping from vm1 to vm2 over management and data networks still works

    Teardown:
        - Delete created vms and flavor
    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net = vif_model_check

    LOG.tc_step("Boot a vm with {} vif model on {} net".format(
        vif_model, net_type))
    res, vm_id, err = vm_helper.boot_vm(name=vif_model,
                                        flavor=flavor_id,
                                        cleanup='function',
                                        nics=nics_to_test)
    assert 0 == res, "VM is not booted successfully. Error: {}".format(err)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping vm over mgmt and {} nets from base vm".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    host = vm_helper.get_vm_host(vm_id)

    # Remove the following ssh VM to sync code once CGTS-9279 is fixed
    LOG.tc_step("Login in to VM & do sync command")
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        vm_ssh.exec_sudo_cmd('sync')

    LOG.tc_step("Reboot vm host {}".format(host))
    vm_helper.evacuate_vms(host=host,
                           vms_to_check=vm_id,
                           ping_vms=True,
                           wait_for_host_up=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step(
            "Add vlan to pci-passthrough interface for VM again after evacuation due to interface change."
        )
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id)

    LOG.tc_step(
        "Check vm still pingable over mgmt, and {} nets after evacuation".
        format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    LOG.tc_step(
        "Wait for rebooted host {} to recover and ensure vm are still reachable"
        .format(host))
    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])
def check_ssh_to_vm_and_wait_for_packets(start_event,
                                         end_event,
                                         received_event,
                                         vm_id,
                                         vm_ip,
                                         vm_ext_port,
                                         expt_output,
                                         protocol='tcp',
                                         timeout=1200):
    """

    Args:
        start_event (Events):
        end_event (Events):
        received_event (Events):
        vm_id:
        vm_ip:
        vm_ext_port:
        expt_output:
        protocol:
        timeout:

    Returns:

    """
    with vm_helper.ssh_to_vm_from_natbox(vm_id,
                                         vm_image_name='ubuntu_14',
                                         vm_ip=vm_ip,
                                         vm_ext_port=vm_ext_port,
                                         username='******',
                                         password='******',
                                         retry=False) as vm_ssh:

        with vm_ssh.login_as_root() as root_ssh:
            LOG.info("Start listening on port 80 on vm {}".format(vm_id))
            cmd = "nc -l{}w 1 80".format('u' if protocol == 'udp' else '')
            root_ssh.send(cmd)
            start_event.set()

            def _check_receive_event():
                # set receive event if msg received
                index = root_ssh.expect(timeout=10, fail_ok=True)
                if index == 0:
                    received_event.set()
                    output = root_ssh.cmd_output
                    assert expt_output in output, \
                        "Output: {} received, but not as expected: {}".format(output, expt_output)
                    LOG.info("Received output: {}".format(output))

            end_time = time.time() + timeout
            while time.time() < end_time:
                # Exit the vm ssh, end thread
                if end_event.is_set():
                    if not received_event.is_set():
                        _check_receive_event()

                    root_ssh.send_control()
                    root_ssh.expect(timeout=10, fail_ok=True)
                    return

                # start_event is unset for a new test step
                if not start_event.is_set():
                    root_ssh.send(cmd)
                    start_event.set()
                    received_event.clear()

                _check_receive_event()
                time.sleep(5)

    assert 0, "end_event is not set within timeout"