コード例 #1
0
    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")
コード例 #2
0
def clear_interface_linux(vm, login_timeout, timeout):
    """
    Clears user interface of a vm without reboot

    :param vm:      VM where cleaning is required
    """
    logging.info("restarting X/gdm on: %s", vm.name)
    session = vm.wait_for_login(username="******", password="******",
                                timeout=login_timeout)

    if "release 7" in session.cmd('cat /etc/redhat-release'):
        command = "gdm"
        pgrep_process = "'^gdm$'"
    else:
        command = "Xorg"
        pgrep_process = "Xorg"

    try:
        pid = session.cmd("pgrep %s" % pgrep_process)
        session.cmd("killall %s" % command)
        utils_misc.wait_for(lambda: _is_pid_alive(session, pid), 10,
                            timeout, 0.2)
    except:
        pass

    try:
        session.cmd("ps -C %s" % command)
    except ShellCmdError:
        raise error.TestFail("X/gdm not running")
コード例 #3
0
def run(test, params, env):
    """
    block_stream_installation test:
    1). guest installation
    2). live snapshot during guest installation
    3). block stream afterwards
    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    args = (test, params, env)
    bg = utils_misc.InterruptedThread(utils_test.run_virt_sub_test, args,
                                      {"sub_type": "unattended_install"})
    bg.start()
    utils_misc.wait_for(bg.is_alive, timeout=10)
    time.sleep(random.uniform(60, 200))
    tag = params["source_image"]
    stream_test = blk_stream.BlockStream(test, params, env, tag)
    stream_test.trash_files.append(stream_test.image_file)
    try:
        stream_test.create_snapshots()
        stream_test.start()
        stream_test.wait_for_finished()
        bg.join()
    finally:
        stream_test.clean()
コード例 #4
0
ファイル: set_link.py プロジェクト: ldoktor/tp-qemu
    def guest_netwok_connecting_check(guest_ip, link_up, change_queues=False):
        """
        Check whether guest network is connective by ping
        """
        if change_queues:
            env["run_change_queues"] = False
            bg_thread = utils_misc.InterruptedThread(
                change_queues_number_repeatly, (guest_ifname,))
            bg_thread.start()

            utils_misc.wait_for(lambda: env["run_change_queues"], 30, 0, 2,
                                "wait queues change start")
        time.sleep(0.5)
        output = utils_test.ping(guest_ip, 10, interface=host_interface,
                                 timeout=20, session=None)[1]
        if not link_up and utils_test.get_loss_ratio(output) < 80:
            err_msg = "guest network still connecting after down the link"
            test.fail(err_msg)
        elif link_up and utils_test.get_loss_ratio(output) > 20:
            err_msg = "All packets lost during ping guest ip after link up"
            test.fail(err_msg)

        if change_queues:
            env["run_change_queues"] = False
            bg_thread.join()
コード例 #5
0
ファイル: iface_network.py プロジェクト: lento-sun/tp-libvirt
    def run_ip_test(session, ip_ver):
        """
        Check iptables on host and ipv6 address on guest
        """
        if ip_ver == "ipv6":
            # Clean up iptables rules for guest to get ipv6 address
            session.cmd_status("ip6tables -F")

        # It may take some time to get the ip address
        def get_ip_func():
            return utils_net.get_guest_ip_addr(session, iface_mac,
                                               ip_version=ip_ver)
        utils_misc.wait_for(get_ip_func, 5)
        if not get_ip_func():
            utils_net.restart_guest_network(session, iface_mac,
                                            ip_version=ip_ver)
            utils_misc.wait_for(get_ip_func, 5)
        vm_ip = get_ip_func()
        logging.debug("Guest has ip: %s", vm_ip)
        if not vm_ip:
            test.fail("Can't find ip address on guest")
        ip_gateway = net_ip_address
        if ip_ver == "ipv6":
            ip_gateway = net_ipv6_address
            # Cleanup ip6talbes on host for ping6 test
            process.system("ip6tables -F")
        if ip_gateway and not routes:
            ping_s, _ = ping(dest=ip_gateway, count=5,
                             timeout=10, session=session)
            if ping_s:
                test.fail("Failed to ping gateway address: %s" % ip_gateway)
コード例 #6
0
ファイル: multi_nics_stress.py プロジェクト: ldoktor/tp-qemu
def launch_netperf_client(test, server_ips, netperf_clients, test_option,
                          test_duration, netperf_para_sess,
                          netperf_cmd_prefix):
    """
    start netperf client in guest.
    """
    start_time = time.time()
    stop_time = start_time + test_duration * 1.5
    logging.info("server_ips = %s", server_ips)
    for s_ip in server_ips:
        for n_client in netperf_clients:
            n_client.bg_start(s_ip, test_option,
                              netperf_para_sess, netperf_cmd_prefix)
            if utils_misc.wait_for(n_client.is_netperf_running, 30, 0, 1,
                                   "Wait netperf test start"):
                logging.info("Netperf test start successfully.")
            else:
                test.error("Can not start netperf client.")

    for n_client in netperf_clients:
        if n_client.is_netperf_running():
            left_time = stop_time - time.time()
            utils_misc.wait_for(lambda: not
                                n_client.is_netperf_running(),
                                left_time, 0, 5,
                                "Wait netperf test finish %ss" % left_time)
コード例 #7
0
 def get_dirty(session, frozen=False):
     """
     Get dirty data of guest
     """
     try:
         data_cmd = "cat /proc/meminfo | grep Dirty"
         if not frozen:
             result = utils_misc.wait_for(lambda:
                                          int(session.
                                              cmd_output(data_cmd).
                                              strip().split()[1]) != 0,
                                          60)
             if result:
                 return int(session.cmd_output(data_cmd).strip().
                            split()[1])
             else:
                 return 0
             dirty_info = session.cmd_output(data_cmd).strip()
             return int(dirty_info.split()[1])
         else:
             result = utils_misc.wait_for(lambda:
                                          int(session.
                                              cmd_output(data_cmd).
                                              strip().split()[1]) == 0,
                                          60)
             if result:
                 return 0
             else:
                 return int(session.cmd_output(data_cmd).strip().
                            split()[1])
     except (IndexError, ValueError), details:
         raise error.TestFail("Get dirty info failed: %s" % details)
コード例 #8
0
ファイル: balloon_check.py プロジェクト: Zhengtong/tp-qemu
 def wait_for_balloon_complete(self, timeout):
     """
     Wait until guest memory don't change
     """
     logging.info("Wait until guest memory don't change")
     is_stable = self._mem_state()
     utils_misc.wait_for(is_stable.next, timeout, step=10.0)
コード例 #9
0
ファイル: set_link.py プロジェクト: Xiangmin/tp-qemu
    def guest_netwok_connecting_check(guest_ip, link_up, change_queues=False):
        """
        Check whether guest network is connective by ping
        """
        if link_up:
            vm.wait_for_login()
            guest_ip = vm.get_address()
        if change_queues:
            env["run_change_queues"] = False
            bg_thread = utils.InterruptedThread(change_queues_number_repeatly,
                                                (guest_ifname,))
            bg_thread.start()

            utils_misc.wait_for(lambda: env["run_change_queues"], 30, 0, 2,
                                "wait queues change start")

        _, output = utils_test.ping(guest_ip, count=10, timeout=20)
        if not link_up and utils_test.get_loss_ratio(output) != 100:
            err_msg = "guest network still connecting after down the link"
            raise error.TestFail(err_msg)
        elif link_up and utils_test.get_loss_ratio(output) == 100:
            err_msg = "All packets lost during ping guest ip after link up"
            raise error.TestFail(err_msg)
        else:
            logging.info("Guest network connecting is exactly as expected")

        if change_queues:
            env["run_change_queues"] = False
            bg_thread.join()
コード例 #10
0
ファイル: ntttcp.py プロジェクト: ayiyaliing/virt-test
 def receiver():
     """ Receive side """
     logging.info("Starting receiver process on %s", receiver_addr)
     if vm_receiver:
         session = vm_receiver.wait_for_login(timeout=login_timeout)
     else:
         username = params.get("username", "")
         password = params.get("password", "")
         prompt = params.get("shell_prompt", "[\#\$]")
         linesep = eval("'%s'" % params.get("shell_linesep", r"\n"))
         client = params.get("shell_client")
         port = int(params.get("shell_port"))
         log_filename = ("session-%s-%s.log" % (receiver_addr,
                         utils_misc.generate_random_string(4)))
         session = remote.remote_login(client, receiver_addr, port,
                                       username, password, prompt,
                                       linesep, log_filename, timeout)
         session.set_status_test_command("echo %errorlevel%")
     install_ntttcp(session)
     ntttcp_receiver_cmd = params.get("ntttcp_receiver_cmd")
     global _receiver_ready
     f = open(results_path + ".receiver", 'a')
     for b in buffers:
         utils_misc.wait_for(lambda: not _wait(), timeout)
         _receiver_ready = True
         rbuf = params.get("fixed_rbuf", b)
         cmd = ntttcp_receiver_cmd % (
             session_num, receiver_addr, rbuf, buf_num)
         r = session.cmd_output(cmd, timeout=timeout,
                                print_func=logging.debug)
         f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r))
     f.close()
     session.close()
コード例 #11
0
    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        virsh_cmd = "service libvirt-guests stop"
        check_flags_parallel(virsh_cmd, bash_cmd %
                             (managed_save_file, managed_save_file,
                              "1", flags), flags)
        ret = utils.run("service libvirt-guests status",
                        ignore_status=True)
        logging.info("status output: %s", ret.stdout)
        if not re.findall(r"Suspending %s" % vm_name,
                          ret.stdout, re.M):
            raise error.TestFail("Can't see messages of suspending vm")
        # status command should return 3.
        if ret.exit_status != 3:
            raise error.TestFail("The exit code %s for libvirt-guests"
                                 " status is not correct" % ret)

        # Wait for VM in shut off state
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        virsh_cmd = "service libvirt-guests start"
        check_flags_parallel(virsh_cmd, bash_cmd %
                             (managed_save_file, managed_save_file,
                              "0", flags), flags)
コード例 #12
0
ファイル: __init__.py プロジェクト: ngu-niny/avocado-vt
    def check_memory(self, vm=None):
        """
        Check is guest memory is really match assgined to VM.

        :param vm: VM object, get VM object from env if vm is None.
        """
        error_context.context("Verify memory info", logging.info)
        if not vm:
            vm = self.env.get_vm(self.params["main_vm"])
        vm.verify_alive()
        threshold = float(self.params.get("threshold", 0.10))
        timeout = float(self.params.get("wait_resume_timeout", 60))
        # Notes:
        #    some sub test will pause VM, here need to wait VM resume
        # then check memory info in guest.
        utils_misc.wait_for(lambda: not vm.is_paused(), timeout=timeout)
        utils_misc.verify_host_dmesg()
        self.os_type = self.params.get("os_type")
        guest_mem_size = super(MemoryHotplugTest, self).get_guest_total_mem(vm)
        vm_mem_size = self.get_vm_mem(vm)
        if abs(guest_mem_size - vm_mem_size) > vm_mem_size * threshold:
            msg = ("Assigned '%s MB' memory to '%s'"
                   "but, '%s MB' memory detect by OS" %
                   (vm_mem_size, vm.name, guest_mem_size))
            raise exceptions.TestFail(msg)
コード例 #13
0
ファイル: pci_hotplug.py プロジェクト: FT4VT/FT4VM-L1_test
    def add_device(pci_num, queues=1):
        info_pci_ref = vm.monitor.info("pci")
        reference = session.cmd_output(reference_cmd)

        try:
            # get function for adding device.
            add_fuction = local_functions["%s_%s" % (cmd_type, pci_type)]
        except Exception:
            raise error.TestError("No function for adding '%s' dev with '%s'" %
                                  (pci_type, cmd_type))
        after_add = None
        if add_fuction:
            # Do add pci device.
            after_add = add_fuction(pci_num, queues)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                o = session.cmd_output(reference_cmd)
                return o != reference

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output(params.get("find_pci_cmd"))
                output = map(string.strip, output.splitlines())
                ref = map(string.strip, reference.splitlines())
                output = [_ for _ in output if _ not in ref]
                output = "\n".join(output)
                if re.search(params.get("match_string"), output, re.I):
                    return True
                return False

            error.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                raise error.TestFail("No new PCI device shown after executing "
                                     "monitor command: 'info pci'")

            secs = int(params.get("wait_secs_for_hook_up"))
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                raise error.TestFail("No new device shown in output of command "
                                     "executed inside the guest: %s" %
                                     reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                raise error.TestFail("PCI %s %s device not found in guest. "
                                     "Command was: %s" %
                                     (pci_model, pci_type,
                                      params.get("find_pci_cmd")))

            # Test the newly added device
            try:
                session.cmd(params.get("pci_test_cmd") % (pci_num + 1))
            except aexpect.ShellError, e:
                raise error.TestFail("Check for %s device failed after PCI "
                                     "hotplug. Output: %r" % (pci_type, e.output))

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise
コード例 #14
0
def do_fstrim(fstrim_type, vm, status_error=False):
    """
    Execute fstrim in different ways, and check its result.
    """
    # ensure that volume capacity won't change before trimming disk.
    if not _sync_finished():
        utils_misc.wait_for(_sync_finished, timeout=300)
    if fstrim_type == "fstrim_cmd":
        session = vm.wait_for_login()
        output = session.cmd_output("fstrim -v /mnt", timeout=240)
        session.close()
        logging.debug(output)
        if re.search("Operation not supported", output):
            if status_error:
                # virtio is not supported in unmap operations
                logging.debug("Expected failure: virtio do not support fstrim")
                return
            else:
                raise error.TestFail("Not supported fstrim on supported "
                                     "envrionment.Bug?")
        try:
            trimmed_bytes = re.search("\d+\sbytes",
                                      output).group(0).split()[0]
            trimmed = int(trimmed_bytes)
            logging.debug("Trimmed size is:%s bytes", trimmed)
        except (AttributeError, IndexError), detail:
            raise error.TestFail("Do fstrim failed:%s" % detail)
        if trimmed == 0:
            raise error.TestFail("Trimmed size is 0.")
コード例 #15
0
    def get_ip_by_mac(mac_addr, try_dhclint=False):
        """
        Get interface IP address by given MAC addrss. If try_dhclint is
        True, then try to allocate IP addrss for the interface.
        """
        session = vm.wait_for_login()

        def f():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = utils_misc.wait_for(f, 10)
            if ip_addr is None:
                iface_name = utils_net.get_linux_ifname(session, mac_addr)
                if try_dhclint:
                    session.cmd("dhclient %s" % iface_name)
                    ip_addr = utils_misc.wait_for(f, 10)
                else:
                    # No IP for the interface, just print the interface name
                    logging.warn("Find '%s' with MAC address '%s', "
                                 "but which has no IP address", iface_name,
                                 mac_addr)
        finally:
            session.close()
        return ip_addr
コード例 #16
0
ファイル: driver_in_use.py プロジェクト: PyLearner/tp-qemu
    def run_bg_stress_test(bg_stress_test):
        """
        Run backgroud test.

        :param bg_stress_test: Background test.
        :return: return the background case thread if it's successful;
                 else raise error.
        """
        error_context.context("Run test %s background" % bg_stress_test,
                              logging.info)
        stress_thread = None
        wait_time = float(params.get("wait_bg_time", 60))
        target_process = params.get("target_process", "")
        bg_stress_run_flag = params.get("bg_stress_run_flag")
        # Need to set bg_stress_run_flag in some cases to make sure all
        # necessary steps are active
        env[bg_stress_run_flag] = False
        stress_thread = utils.InterruptedThread(
            utils_test.run_virt_sub_test, (test, params, env),
            {"sub_type": bg_stress_test})
        stress_thread.start()
        if not utils_misc.wait_for(lambda: check_bg_running(target_process),
                                   120, 0, 1):
            raise exceptions.TestFail("Backgroud test %s is not "
                                      "alive!" % bg_stress_test)
        if params.get("set_bg_stress_flag", "no") == "yes":
            logging.info("Wait %s test start" % bg_stress_test)
            if not utils_misc.wait_for(lambda: env.get(bg_stress_run_flag),
                                       wait_time, 0, 0.5):
                err = "Fail to start %s test" % bg_stress_test
                raise exceptions.TestError(err)
        env["bg_status"] = 1
        return stress_thread
コード例 #17
0
    def check_boot_result(boot_fail_info, device_name):
        """
        Check boot result, and logout from iscsi device if boot from iscsi.
        """

        logging.info("Wait for display and check boot info.")
        infos = boot_fail_info.split(';')
        f = lambda: re.search(infos[0], vm.serial_console.get_output())
        utils_misc.wait_for(f, timeout, 1)

        logging.info("Try to boot from '%s'" % device_name)
        try:
            if dev_name == "hard-drive" or (dev_name == "scsi-hd" and not
                                            params.get("image_name_stg")):
                error.context("Log into the guest to verify it's up",
                              logging.info)
                session = vm.wait_for_login(timeout=timeout)
                session.close()
                vm.destroy()
                return

            output = vm.serial_console.get_output()

            for i in infos:
                if not re.search(i, output):
                    raise error.TestFail("Could not boot from"
                                         " '%s'" % device_name)
        finally:
            cleanup(device_name)
コード例 #18
0
ファイル: iface_network.py プロジェクト: Antique/tp-libvirt
    def run_ip_test(session, ip_ver):
        """
        Check iptables on host and ipv6 address on guest
        """
        if ip_ver == "ipv6":
            # Clean up iptables rules for guest to get ipv6 address
            session.cmd_status("ip6tables -F")
        utils_net.restart_guest_network(session, iface_mac,
                                        ip_version=ip_ver)

        # It may take some time to get the ip address
        def get_ip_func():
            return utils_net.get_guest_ip_addr(session, iface_mac,
                                               ip_version=ip_ver)

        utils_misc.wait_for(get_ip_func, 10)
        vm_ip = get_ip_func()
        logging.debug("Guest has ip: %s", vm_ip)
        if not vm_ip:
            raise error.TestFail("Can't find ip address on guest")
        ping_cmd = "ping -c 5"
        ip_gateway = net_ip_address
        if ip_ver == "ipv6":
            ping_cmd = "ping6 -c 5"
            ip_gateway = net_ipv6_address
        if ip_gateway:
            if utils.system("%s %s" % (ping_cmd, ip_gateway),
                            ignore_status=True):
                raise error.TestFail("Failed to ping gateway address: %s"
                                     % ip_gateway)
コード例 #19
0
def run(test, params, env):
    """
    drive_mirror_stress test:
    1). guest installation
    2). start mirror during guest installation
    3). after installation complete, reboot guest verfiy guest reboot correctly.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    args = (test, params, env)
    bg = utils_misc.InterruptedThread(utils_test.run_virt_sub_test, args,
                                      {"sub_type": "unattended_install"})
    bg.start()
    utils_misc.wait_for(bg.is_alive, timeout=10)
    time.sleep(random.uniform(60, 200))
    tag = params["source_image"]
    mirror_test = drive_mirror.DriveMirror(test, params, env, tag)
    mirror_test.trash_files.append(mirror_test.image_file)
    try:
        mirror_test.start()
        mirror_test.wait_for_steady()
        mirror_test.reopen()
        bg.join()
    finally:
        mirror_test.clean()
コード例 #20
0
ファイル: sr_iov_hotplug.py プロジェクト: FT4VT/FT4VM-L1_test
    def add_device(pci_num):
        reference_cmd = params["reference_cmd"]
        find_pci_cmd = params["find_pci_cmd"]
        info_pci_ref = vm.monitor.info("pci")
        reference = session.cmd_output(reference_cmd)

        try:
            # get function for adding device.
            add_fuction = local_functions["%s_iov" % cmd_type]
        except Exception:
            raise error.TestError(
                "No function for adding sr-iov dev with '%s'" %
                cmd_type)
        after_add = None
        if add_fuction:
            # Do add pci device.
            after_add = add_fuction(pci_num)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                o = session.cmd_output(reference_cmd)
                return o != reference

            # Define a helper function to catch PCI device string
            def _find_pci():
                o = session.cmd_output(find_pci_cmd)
                if re.search(match_string, o, re.IGNORECASE):
                    return True
                else:
                    return False

            error.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                raise error.TestFail("No new PCI device shown after executing "
                                     "monitor command: 'info pci'")

            secs = int(params["wait_secs_for_hook_up"])
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                raise error.TestFail("No new device shown in output of command "
                                     "executed inside the guest: %s" %
                                     reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                raise error.TestFail("New add sr-iov device not found in guest. "
                                     "Command was: %s" % find_pci_cmd)

            # Test the newly added device
            try:
                session.cmd(params["pci_test_cmd"] % (pci_num + 1))
            except aexpect.ShellError, e:
                raise error.TestFail("Check for sr-iov device failed after PCI "
                                     "hotplug. Output: %r" % e.output)

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise
コード例 #21
0
    def function(self, index):
        """
        Return True/False (good/bad) result of a device functioning
        """
        dev_name = '/dev/' + self.devname(index)
        # Host image path is static known value
        test_data = self.make_image_file_path(index)
        if test_data not in self.test_data_list:
            self.test_data_list.append(test_data)
        byte_size = self.meg * 1024 * 1024
        # Place test data at end of device to also confirm sizing
        offset = byte_size - len(test_data)
        logging.info('Trying to read test data, %dth device %s, '
                     'at offset %d.', index + 1, dev_name, offset)
        session = None

        # Since we know we're going to fail, no sense waiting for the
        # default timeout to expire or login()'s code to get_address
        # (currently 300 seconds) or any other timeout code.  With the
        # guest not alive, just return failure. Not doing so caused a
        # a 96 minute pause per test with 16 devices all waiting for
        # the timeouts to occur and probably a days worth of log messages
        # waiting for something to happen that can't.
        if not self.test_params.main_vm.is_alive():
            logging.debug("VirtualDiskBasic functional test skipping login "
                          "vm is not alive.")
            return False

        try:
            session = self.test_params.main_vm.login()

            # The device may not be ready on guest,
            # just wait at most 5 seconds here
            utils_misc.wait_for(lambda:
                                not session.cmd_status('ls %s' % dev_name), 5)

            # aexpect combines stdout + stderr, throw away stderr
            output = session.cmd_output('tail -c %d %s'
                                        % (len(test_data) + 1, dev_name))
            session.close()
        except (virt_vm.VMAddressError, remote.LoginError,
                aexpect.ExpectError, aexpect.ShellError):
            try:
                session.close()
            except AttributeError:
                pass   # session == None
            logging.debug("VirtualDiskBasic functional test raised an exception")
            return False
        else:
            gotit = [data for data in self.test_data_list if output.strip('\n') in data]
            logging.info("Test data detected in device: %s",
                         gotit)
            if not gotit:
                logging.debug("Expecting: '%s' in %s", test_data, self.test_data_list)
                logging.debug("Received: '%s'", output)
                return False
            else:
                return True
コード例 #22
0
ファイル: iface_options.py プロジェクト: nertpinx/tp-libvirt
 def get_guest_ip(session, mac):
     """
     Wrapper function to get guest ip address
     """
     utils_net.restart_guest_network(session, mac)
     # Wait for IP address is ready
     utils_misc.wait_for(
         lambda: utils_net.get_guest_ip_addr(session, mac), 10)
     return utils_net.get_guest_ip_addr(session, mac)
コード例 #23
0
def run(test, params, env):
    """
    KVM -no-shutdown flag test:
    1. Boot a guest, with -no-shutdown flag on command line
    2. Run 'system_powerdown' command in monitor
    3. Wait for guest OS to shutdown down and issue power off to the VM
    4. Run 'system_reset' qemu monitor command
    5. Run 'cont' qemu monitor command
    6. Wait for guest OS to boot up
    7. Repeat step 2-6 for 5 times.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    timeout = int(params.get("login_timeout", 360))
    repeat_times = int(params.get("repeat_times", 5))

    error.base_context("Qemu -no-shutdown test")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    qemu_process_id = vm.get_pid()
    session = vm.wait_for_login(timeout=timeout)
    logging.info("The guest bootup successfully.")

    for i in xrange(repeat_times):
        error.context("Round %s : Send monitor cmd system_powerdown."
                      % str(i + 1), logging.info)
        # Send a system_powerdown monitor command
        vm.monitor.cmd("system_powerdown")
        # Wait for the session to become unresponsive and close it
        if not utils_misc.wait_for(lambda: not session.is_responsive(),
                                   timeout, 0, 1):
            raise error.TestFail("Oops, Guest refuses to go down!")
        if session:
            session.close()
        # Check the qemu id is not change
        if not utils_misc.wait_for(lambda: vm.is_alive(), 5, 0, 1):
            raise error.TestFail("VM not responsive after system_"
                                 "powerdown with -no-shutdown!")
        if vm.get_pid() != qemu_process_id:
            raise error.TestFail("Qemu pid changed after system_powerdown!")
        logging.info("Round %s -> System_powerdown successfully.", str(i + 1))

        # Send monitor command system_reset and cont
        error.context("Round %s : Send monitor command system_reset and cont."
                      % str(i + 1), logging.info)
        vm.monitor.cmd("system_reset")
        vm.resume()

        session = vm.wait_for_login(timeout=timeout)
        logging.info("Round %s -> Guest is up successfully." % str(i + 1))
        if vm.get_pid() != qemu_process_id:
            raise error.TestFail("Qemu pid changed after system_reset & cont!")
    if session:
        session.close()
コード例 #24
0
ファイル: utils_v2v.py プロジェクト: jcfaracco/avocado-vt
 def wait_for_x_start(self, timeout=30):
     """
     Wait for S server start
     """
     cmd = 'xset -q'
     if self.run_cmd(cmd)[0] == 127:
         return
     utils_misc.wait_for(lambda: not bool(self.run_cmd(cmd, debug=False)[0]),
                         timeout)
コード例 #25
0
ファイル: balloon_check.py プロジェクト: ngu-niny/tp-qemu
 def wait_for_balloon_complete(self, timeout):
     """
     Wait until guest memory don't change
     """
     logging.info("Wait until guest memory don't change")
     threshold = int(self.params.get("guest_stable_threshold", 100))
     is_stable = self._mem_state(threshold)
     utils_misc.wait_for(lambda: next(is_stable), timeout,
                         step=float(self.params.get("guest_check_step", 10.0)))
コード例 #26
0
def nodedev_create_from_xml(params):
    """
    Create a node device with a xml object.

    :param params: Including nodedev_parent, scsi_wwnn, scsi_wwpn set in xml
    :return: The scsi device name just created
    """
    nodedev_parent = params.get("nodedev_parent")
    scsi_wwnn = params.get("scsi_wwnn")
    scsi_wwpn = params.get("scsi_wwpn")
    status_error = params.get("status_error", "no")
    vhba_xml = NodedevXML()
    vhba_xml.cap_type = 'scsi_host'
    vhba_xml.fc_type = 'fc_host'
    vhba_xml.parent = nodedev_parent
    vhba_xml.wwnn = scsi_wwnn
    vhba_xml.wwpn = scsi_wwpn
    logging.debug("Prepare the nodedev XML: %s", vhba_xml)
    vhba_file = mktemp()
    with open(vhba_file, 'w') as xml_object:
        xml_object.write(str(vhba_xml))

    result = virsh.nodedev_create(vhba_file,
                                  debug=True,
                                  )
    status = result.exit_status

    # Remove temprorary file
    os.unlink(vhba_file)

    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            raise exceptions.TestFail("%d not a expected command "
                                      "return value", status)
    elif status_error == "no":
        if status:
            raise exceptions.TestFail(result.stderr)
        else:
            output = result.stdout
            logging.info(output)
            for scsi in output.split():
                if scsi.startswith('scsi_host'):
                    # Check node device
                    utils_misc.wait_for(
                        lambda: check_nodedev(scsi, nodedev_parent),
                        timeout=_DELAY_TIME)
                    if check_nodedev(scsi, nodedev_parent):
                        return scsi
                    else:
                        raise exceptions.TestFail(
                            "XML of vHBA card '%s' is not correct,"
                            "Please refer to log errors for detailed info" % scsi)
コード例 #27
0
 def load_stress(self):
     """
     load IO/CPU/Memory stress in guest;
     """
     error.context("launch stress app in guest", logging.info)
     args = (self.test, self.params, self.env, self.params["stress_test"])
     bg_test = utils_test.BackgroundTest(utils_test.run_virt_sub_test, args)
     bg_test.start()
     if not utils_misc.wait_for(bg_test.is_alive, first=10, step=3, timeout=100):
         raise error.TestFail("background test start failed")
     if not utils_misc.wait_for(self.stress_app_running, timeout=360, step=5):
         raise error.TestFail("stress app isn't running")
コード例 #28
0
    def unload_stress(self):
        """
        stop stress app
        """
        def _unload_stress():
            utils.run(self.stop_cmd, ignore_status=True)
            if not self.app_running():
                return True
            return False

        error.context("stop stress app on host", logging.info)
        utils_misc.wait_for(_unload_stress, first=2.0,
                            text="wait stress app quit", step=1.0, timeout=60)
コード例 #29
0
    def unload_stress(self):
        """
        stop stress app
        """
        def _unload_stress():
            session = self.get_session()
            cmd = self.params.get("stop_cmd")
            session.sendline(cmd)
            return not self.stress_app_running()

        error.context("stop stress app in guest", logging.info)
        utils_misc.wait_for(_unload_stress, first=2.0,
                            text="wait stress app quit", step=1.0, timeout=120)
コード例 #30
0
ファイル: watchdog.py プロジェクト: hshl1214/tp-qemu
    def _action_check(session, watchdog_action):
        """
        Check whether or not the watchdog action occurred. if the action was
        not occurred will raise error.
        """
        # when watchdog action is pause, shutdown, reset, poweroff
        # the vm session will lost responsive
        response_timeout = int(params.get("response_timeout", '240'))
        error.context("Check whether or not watchdog action '%s' take effect"
                      % watchdog_action, logging.info)
        if not utils_misc.wait_for(lambda: not session.is_responsive(),
                                   response_timeout, 0, 1):
            if watchdog_action == "none" or watchdog_action == "debug":
                logging.info("OK, the guest session is responsive still")
            else:
                raise error.TestFail(
                    "Oops, seems action '%s' take no effect, ",
                    "guest is responsive" % watchdog_action)

        # when action is poweroff or shutdown(without no-shutdown option), the vm
        # will dead, and qemu exit.
        # The others the vm monitor still responsive, can report the vm status.
        if (watchdog_action == "poweroff" or (watchdog_action == "shutdown" and
                                              params.get("disable_shutdown") != "yes")):
            if not utils_misc.wait_for(lambda: vm.is_dead(),
                                       response_timeout, 0, 1):
                raise error.TestFail(
                    "Oops, seems '%s' action take no effect, ",
                    "guest is alive!" % watchdog_action)
        else:
            if watchdog_action == "pause":
                f_param = "paused"
            elif watchdog_action == "shutdown":
                f_param = "shutdown"
            else:
                f_param = "running"

            if not utils_misc.wait_for(
                lambda: vm.monitor.verify_status(f_param),
                    response_timeout, 0, 1):
                logging.debug("Monitor status is:%s" % vm.monitor.get_status())
                raise error.TestFail(
                    "Oops, seems action '%s' take no effect, ",
                    "Wrong monitor status!" % watchdog_action)

        # when the action is reset, need can relogin the guest.
        if watchdog_action == "reset":
            logging.info("Try to login the guest after reboot")
            vm.wait_for_login(timeout=relogin_timeout)
        logging.info("Watchdog action '%s' come into effect." %
                     watchdog_action)
コード例 #31
0
def run(test, params, env):
    """
    Test block devices with migration..

        Scenario "with_scsi_on2off":
            1) Boot guest with scsi=on on src host.
            2) Boot guest with scsi=off on dst host.
            3) Do live Migration.

        Scenario "with_change_cdrom":
            1) Run qemu with specified cdrom loaded.
            2) Check the cdrom info by qmp.
            3) Check the cdrom's size inside guest.
            4) Eject cdrom, and check the info again.
            5) Load a new cdrom image, and check the cdrom info again.
            6) Check the cdrom's size inside guest.
            7) Start dest vm with new iso file in listening mode.
            8) Migrate from src to dst.
            9) Do system_reset in dst vm.

        Scenario "with_dataplane_on2off":
            1) Start VM with dataplane (both system disk and data disk).
            2) For Windows: check whether viostor.sys verifier enabled in guest.
            3) Do live migration.
            4) Do iozone testing after migration.
            5) Reboot guest.

        Scenario "with_post_copy.with_mem_stress":
            1) Start source VM with virtio-scsi-pci (system and data disks)
            2) For Windows: check whether viostor.sys verifier enabled in guest.
            3) Run stress guest.
            4) Start dst guest with "-incoming tcp:x:xxxx"/
            5) On source qemu & dst qemu, set postcopy mode on.
            6) Do live migration.
            7) Migration could not finish under high stress,
               then change into postcopy mode.
            8) Repeat step 4~7 to migrate guest back to source host.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    class _StressThread(threading.Thread):
        def __init__(self, target, exit_event, args=()):
            threading.Thread.__init__(self)
            self.exc_info = None
            self.exit_event = exit_event
            self._target = target
            self._args = args

        def run(self):
            try:
                self._target(*self._args)
            except Exception as e:
                logging.error(str(e))
                self.exc_info = sys.exc_info()
                self.exit_event.set()

    def scp_package(src, dst):
        """ Copy file from the host to the guest. """
        scp_to_remote(vm.get_address(), '22', params.get('username'),
                      params.get('password'), src, dst)

    def unpack_package(session, src, dst):
        """ Unpack the package. """
        session.cmd('tar -xvf %s -C %s' % (src, dst))

    def install_package(session, src, dst):
        """ Install the package. """
        cmd = ' && '.join(
            ("cd %s && ./configure --prefix=%s", "make && make install"))
        session.cmd(cmd % (src, dst), 300)

    def cleanup(session, src):
        """ Remove files. """
        session.cmd('rm -rf %s' % src)

    def _get_data_disks_linux():
        """ Get the data disks by serial or wwn options in linux. """
        for data_image in params['images'].split()[1:]:
            extra_params = params.get("blk_extra_params_%s" % data_image, '')
            match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M)
            if match:
                drive_id = match.group(2)
            else:
                continue
            drive_path = utils_misc.get_linux_drive_path(session, drive_id)
            if not drive_path:
                test.error("Failed to get '%s' drive path" % data_image)
            yield drive_path[5:], params.object_params(
                data_image)['image_size']

    def _get_data_disks_win():
        """ Get the data disks in windows. """
        for data_image in params['images'].split()[1:]:
            size = params.object_params(data_image)['image_size']
            yield utils_disk.get_windows_disks_index(session, size)[0], size

    def get_data_disks():
        """ Get the data disks. """
        _get_disks = _get_data_disks_win if windows else _get_data_disks_linux
        for disk, size in _get_disks():
            yield disk, size

    def format_data_disks():
        """ Format the data disks. """
        for disk, size in get_data_disks():
            if windows:
                if not utils_disk.update_windows_disk_attributes(
                        session, disk):
                    test.fail("Failed to update windows disk attributes.")
            yield utils_disk.configure_empty_disk(session, disk, size,
                                                  os_type)[0]

    def run_iozone(timeout):
        """ Do iozone testing inside guest. """
        logging.info("Do iozone testing on data disks.")
        iozone = generate_instance(params, vm, 'iozone')
        try:
            for target in format_data_disks():
                iozone.run(stress_options.format(target), timeout)
        finally:
            iozone.clean()

    def run_stressapptest(timeout):
        """ Do stressapptest testing inside guest. """
        logging.info("Do stressapptest testing on data disks.")
        sub_session = vm.wait_for_login(timeout=360)
        try:
            host_path = os.path.join(data_dir.get_deps_dir('stress'),
                                     'stressapptest.tar')
            scp_package(host_path, '/home/')
            unpack_package(sub_session, '/home/stressapptest.tar', '/home')
            install_package(sub_session, '/home/stressapptest',
                            '/home/stressapptest')
            stress_bin_path = '/home/stressapptest/bin/stressapptest'
            sub_session.cmd('{} {}'.format(stress_bin_path, stress_options),
                            timeout)
        finally:
            cleanup(sub_session, '/home/stressapptest*')
            sub_session.close()

    def run_stress_background(timeout):
        """ Run stress inside guest. """
        thread = _StressThread(stress_maps[stress_name], exit_event,
                               (timeout, ))
        thread.start()
        return thread

    def get_cdrom_size():
        """ Get the size of cdrom device inside guest. """
        error_context.context("Get the cdrom's size in guest.", logging.info)
        cmd = params["check_size"]
        if not utils_misc.wait_for(
                lambda: re.search(r'(\d+)', session.cmd(cmd), re.M), 10):
            test.fail('Failed to get the cdrom\'s size.')
        cdrom_size = re.search(r'(\d+)', session.cmd(cmd), re.M).group(1)
        cdrom_size = int(cdrom_size) * 512 if not windows else int(cdrom_size)
        logging.info("The cdrom's size is %s in guest.", cdrom_size)
        return cdrom_size

    def get_iso_size(iso_file):
        """ Get the size of iso on host."""
        error_context.context("Get the iso size on host.", logging.info)
        return int(
            process.system_output('ls -l %s | awk \'{print $5}\'' % iso_file,
                                  shell=True).decode())

    def compare_cdrom_size(iso_file):
        """ Compare the cdrom's size between host and guest. """
        error_context.context(
            "Compare the cdrom's size between host and guest.", logging.info)
        ios_size = get_iso_size(iso_file)
        if not utils_misc.wait_for(
                lambda: get_cdrom_size() == ios_size, 30, step=3):
            test.fail(
                'The size inside guest is not equal to iso size on host.')
        return get_cdrom_size()

    def check_cdrom_info_by_qmp(check_items):
        """ Check the cdrom device info by qmp. """
        error_context.context(
            'Check if the info \"%s\" are match with the output of query-block.'
            % str(check_items), logging.info)
        blocks = vm.monitor.info_block()
        for key, val in check_items.items():
            if (key == 'device'
                    and val == dev_id) or blocks[dev_id][key] == val:
                continue
            test.fail('No such \"%s: %s\" in the output of query-block.' %
                      (key, val))

    def check_block(block):
        """ Check if the block device is existed in query-block."""
        return True if block in str(vm.monitor.info("block")) else False

    def eject_cdrom():
        """ Eject cdrom. """
        error_context.context("Eject the original device.", logging.info)
        with eject_check:
            vm.eject_cdrom(device_name, True)
        if check_block(orig_img_name):
            test.fail("Failed to eject cdrom %s. " % orig_img_name)

    def change_cdrom():
        """ Change cdrom. """
        new_img_name = params["cdrom_new_file"]
        error_context.context("Insert new image to device.", logging.info)
        with change_check:
            vm.change_media(device_name, new_img_name)
        if not check_block(new_img_name):
            test.fail("Fail to change cdrom to %s." % new_img_name)

    def change_vm_power():
        """ Change the vm power. """
        method, command = params['command_opts'].split(',')
        logging.info('Sending command(%s): %s' % (method, command))
        if method == 'shell':
            p_session = vm.wait_for_login(timeout=360)
            p_session.sendline(command)
            p_session.close()
        else:
            getattr(vm.monitor, command)()

    def check_vm_status(timeout=600):
        """ Check the status of vm. """
        action = 'shutdown' if shutdown_vm else 'login'
        if not getattr(vm, 'wait_for_%s' % action)(timeout=timeout):
            test.fail('Failed to %s vm.' % action)

    def set_dst_params():
        """ Set the params of dst vm. """
        for name, val in ast.literal_eval(params.get('set_dst_params',
                                                     '{}')).items():
            vm.params[name] = val

    def ping_pong_migration(repeat_times):
        """ Do ping pong migration. """
        for i in range(repeat_times):
            set_dst_params()
            if i % 2 == 0:
                logging.info("Round %s ping..." % str(i / 2))
            else:
                logging.info("Round %s pong..." % str(i / 2))
            if do_migration_background:
                args = (mig_timeout, mig_protocol, mig_cancel_delay)
                kwargs = {
                    'migrate_capabilities': capabilities,
                    'mig_inner_funcs': inner_funcs,
                    'env': env
                }
                migration_thread = utils_misc.InterruptedThread(
                    vm.migrate, args, kwargs)
                migration_thread.start()
                if not utils_misc.wait_for(
                        lambda:
                    (bool(vm.monitor.query("migrate")) and
                     ('completed' != vm.monitor.query("migrate")['status'])),
                        30):
                    test.error('Migration thread is not alive.')
                vm.monitor.wait_for_migrate_progress(
                    float(params['percent_start_post_copy']))
                vm.monitor.migrate_start_postcopy()
                migration_thread.join()
                logging.info('Migration thread is done.')
            else:
                vm.migrate(mig_timeout,
                           mig_protocol,
                           mig_cancel_delay,
                           migrate_capabilities=capabilities,
                           mig_inner_funcs=inner_funcs,
                           env=env)

    def bg_stress_is_alive(session, name):
        """ Check whether the background stress is alive. """
        return session.cmd_output('pgrep -xl %s' % name)

    shutdown_vm = params.get('shutdown_vm', 'no') == 'yes'
    reboot = params.get('reboot_vm', 'no') == 'yes'
    with_cdrom = params.get('with_cdrom', 'no') == 'yes'
    os_type = params['os_type']
    windows = os_type == 'windows'
    src_desc = params.get('src_addition_desc', '')
    dst_desc = params.get('dst_addition_desc', '')

    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")
    mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
    inner_funcs = ast.literal_eval(params.get("migrate_inner_funcs", "[]"))
    capabilities = ast.literal_eval(params.get("migrate_capabilities", "{}"))
    do_migration_background = params.get('do_migration_background',
                                         'no') == 'yes'

    stress_name = params.get('stress_name')
    stress_maps = {'iozone': run_iozone, 'stressapptest': run_stressapptest}
    stress_options = params.get('stress_options')
    stress_timeout = int(params.get('stress_timeout', '1800'))
    do_stress_background = params.get('do_stress_background', 'no') == 'yes'
    kill_bg_stress = params.get('kill_bg_stress', 'no') == 'yes'

    exit_event = threading.Event()

    error_context.context('Boot guest %s on src host.' % src_desc,
                          logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=360)
    if windows:
        session = utils_test.qemu.windrv_check_running_verifier(
            session, vm, test, params["driver_name"])

    if params.get('run_stress_before_migration', 'no') == 'yes':
        if do_stress_background:
            stress_thread = run_stress_background(stress_timeout)
            if not utils_misc.wait_for(
                    lambda: (stress_thread.exit_event.is_set() or
                             bg_stress_is_alive(session, stress_name)),
                    120,
                    step=3):
                test.error('The %s is not alive.' % stress_name)
            if stress_thread.exit_event.is_set():
                stress_thread.exit_event.clear()
                six.reraise(stress_thread.exc_info[0],
                            stress_thread.exc_info[1],
                            stress_thread.exc_info[2])
        else:
            stress_maps[stress_name](stress_timeout)

    if with_cdrom:
        cdrom_params = params.object_params(params['cdroms'])
        check_orig_items = ast.literal_eval(cdrom_params['check_orig_items'])
        dev_id = check_orig_items['device']
        check_cdrom_info_by_qmp(check_orig_items)
        orig_size = compare_cdrom_size(params['cdrom_orig_file'])

        orig_img_name = params["cdrom_orig_file"]
        device_name = vm.get_block({"file": orig_img_name})
        if device_name is None:
            test.fail("Failed to get device using image %s." % orig_img_name)

        eject_check = QMPEventCheckCDEject(vm, device_name)
        change_check = QMPEventCheckCDChange(vm, device_name)
        eject_cdrom()
        change_cdrom()

        check_new_items = ast.literal_eval(cdrom_params['check_new_items'])
        check_cdrom_info_by_qmp(check_new_items)
        new_size = compare_cdrom_size(params['cdrom_new_file'])
        if new_size == orig_size:
            test.fail(
                'The new size inside guest is equal to the orig iso size.')

    error_context.context('Boot guest %s on dst host.' % dst_desc,
                          logging.info)
    ping_pong_migration(int(params.get('repeat_ping_pong', '1')))

    if params.get('run_stress_after_migration', 'no') == 'yes':
        if do_stress_background:
            run_stress_background(stress_timeout)
        else:
            stress_maps[stress_name](stress_timeout)

    if do_stress_background:
        if bg_stress_is_alive(session, stress_name):
            if kill_bg_stress:
                session.cmd('killall %s' % stress_name)
            else:
                stress_thread.join(stress_timeout)
                if stress_thread.exit_event.is_set():
                    stress_thread.exit_event.clear()
                    six.reraise(stress_thread.exc_info[0],
                                stress_thread.exc_info[1],
                                stress_thread.exc_info[2])

    if shutdown_vm or reboot:
        change_vm_power()
        check_vm_status()
コード例 #32
0
ファイル: jumbo.py プロジェクト: kmaehara/virt-test
def run_jumbo(test, params, env):
    """
    Test the RX jumbo frame function of vnics:

    1) Boot the VM.
    2) Change the MTU of guest nics and host taps depending on the NIC model.
    3) Add the static ARP entry for guest NIC.
    4) Wait for the MTU ok.
    5) Verify the path MTU using ping.
    6) Ping the guest with large frames.
    7) Increment size ping.
    8) Flood ping the guest with large frames.
    9) Verify the path MTU.
    10) Recover the MTU.

    @param test: QEMU test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    timeout = int(params.get("login_timeout", 360))
    mtu = params.get("mtu", "1500")
    max_icmp_pkt_size = int(mtu) - 28
    flood_time = params.get("flood_time", "300")
    os_type = params.get("os_type")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)
    session_serial = vm.wait_for_serial_login(timeout=timeout)

    ifname = vm.get_ifname(0)
    ip = vm.get_address(0)
    if ip is None:
        raise error.TestError("Could not get the IP address")

    try:
        error.context("Changing the MTU of guest", logging.info)
        # Environment preparation
        mac = vm.get_mac_address(0)
        if os_type == "linux":
            ethname = utils_test.get_linux_ifname(session, mac)
            guest_mtu_cmd = "ifconfig %s mtu %s" % (ethname, mtu)
        else:
            connection_id = utils_net.get_windows_nic_attribute(
                session, "macaddress", mac, "netconnectionid")

            index = utils_net.get_windows_nic_attribute(
                session, "netconnectionid", connection_id, "index")
            reg_set_mtu_pattern = params.get("reg_mtu_cmd")
            mtu_key_word = params.get("mtu_key", "MTU")
            reg_set_mtu = reg_set_mtu_pattern % (int(index), mtu_key_word,
                                                 int(mtu))
            guest_mtu_cmd = "%s " % reg_set_mtu

        session.cmd(guest_mtu_cmd)
        if os_type == "windows":
            utils_net.restart_windows_guest_network(session_serial,
                                                    connection_id)

        error.context("Chaning the MTU of host tap ...", logging.info)
        host_mtu_cmd = "ifconfig %s mtu %s" % (ifname, mtu)
        utils.run(host_mtu_cmd)

        error.context("Add a temporary static ARP entry ...", logging.info)
        arp_add_cmd = "arp -s %s %s -i %s" % (ip, mac, ifname)
        utils.run(arp_add_cmd)

        def is_mtu_ok():
            s, _ = utils_test.ping(ip,
                                   1,
                                   interface=ifname,
                                   packetsize=max_icmp_pkt_size,
                                   hint="do",
                                   timeout=2)
            return s == 0

        def verify_mtu():
            logging.info("Verify the path MTU")
            s, o = utils_test.ping(ip,
                                   10,
                                   interface=ifname,
                                   packetsize=max_icmp_pkt_size,
                                   hint="do",
                                   timeout=15)
            if s != 0:
                logging.error(o)
                raise error.TestFail("Path MTU is not as expected")
            if utils_test.get_loss_ratio(o) != 0:
                logging.error(o)
                raise error.TestFail("Packet loss ratio during MTU "
                                     "verification is not zero")

        def flood_ping():
            logging.info("Flood with large frames")
            utils_test.ping(ip,
                            interface=ifname,
                            packetsize=max_icmp_pkt_size,
                            flood=True,
                            timeout=float(flood_time))

        def large_frame_ping(count=100):
            logging.info("Large frame ping")
            _, o = utils_test.ping(ip,
                                   count,
                                   interface=ifname,
                                   packetsize=max_icmp_pkt_size,
                                   timeout=float(count) * 2)
            ratio = utils_test.get_loss_ratio(o)
            if ratio != 0:
                raise error.TestFail("Loss ratio of large frame ping is %s" %
                                     ratio)

        def size_increase_ping(step=random.randrange(90, 110)):
            logging.info("Size increase ping")
            for size in range(0, max_icmp_pkt_size + 1, step):
                logging.info("Ping %s with size %s", ip, size)
                s, o = utils_test.ping(ip,
                                       1,
                                       interface=ifname,
                                       packetsize=size,
                                       hint="do",
                                       timeout=1)
                if s != 0:
                    s, o = utils_test.ping(ip,
                                           10,
                                           interface=ifname,
                                           packetsize=size,
                                           adaptive=True,
                                           hint="do",
                                           timeout=20)

                    if utils_test.get_loss_ratio(o) > int(
                            params.get("fail_ratio", 50)):
                        raise error.TestFail("Ping loss ratio is greater "
                                             "than 50% for size %s" % size)

        logging.info("Waiting for the MTU to be OK")
        wait_mtu_ok = 10
        if not utils_misc.wait_for(is_mtu_ok, wait_mtu_ok, 0, 1):
            logging.debug(commands.getoutput("ifconfig -a"))
            raise error.TestError("MTU is not as expected even after %s "
                                  "seconds" % wait_mtu_ok)

        # Functional Test
        error.context("Checking whether MTU change is ok", logging.info)
        verify_mtu()
        large_frame_ping()
        size_increase_ping()

        # Stress test
        flood_ping()
        verify_mtu()

    finally:
        # Environment clean
        if session:
            session.close()
        logging.info("Removing the temporary ARP entry")
        utils.run("arp -d %s -i %s" % (ip, ifname))
コード例 #33
0
def run(test, params, env):
    """
    Test native TLS encryption on chardev TCP transports
    Scenario 1:
        a. Run gnutls server
        b. Launch QEMU with a serial port as TLS client
        c. Check the server endpoint output
    Scenario 2:
        a. Launch QEMU with a serial port as TLS server
        b. Run gnutls client to connect TLS server
        c. Check the client endpoint output
    Scenario 3:
        a. Launch QEMU with a serial port as TLS server
        b. Execute 'cat /dev/ttyS0' in guest which boot from step 1
        c. Launch QEMU with a serial port as TLS client
        d. Check the output of step b
    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    clean_cmd = params["clean_cmd"]
    try:
        pkgs = params.objects("depends_pkgs")
        if not utils_package.package_install(pkgs):
            test.error("Install dependency packages failed")
        setup_certs(params)
        expected_msg = params["expected_msg"]
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        port = str(utils_misc.find_free_ports(5000, 9999, 1, hostname)[0])

        # Scenario 1
        gnutls_cmd_server = params.get("gnutls_cmd_server")
        if gnutls_cmd_server:
            gnutls_cmd_server = gnutls_cmd_server % port
            params["extra_params"] = params["extra_params"] % (hostname, port)
            error_context.context("Run gnutls server ...", logging.info)
            tls_server = aexpect.run_bg(gnutls_cmd_server)
            params['start_vm'] = 'yes'
            vm_name = params['main_vm']
            error_context.context(
                "Launch QEMU with a serial port as TLS client", logging.info)
            env_process.preprocess_vm(test, params, env, vm_name)
            if not utils_misc.wait_for(
                    lambda: expected_msg in tls_server.get_output(),
                    first=5,
                    timeout=15):
                test.fail("TLS server can't connect client succssfully.")

        # Scenario 2
        gnutls_cmd_client = params.get("gnutls_cmd_client")
        if gnutls_cmd_client:
            gnutls_cmd_client = gnutls_cmd_client % (port, hostname)
            params["extra_params"] = params["extra_params"] % (hostname, port)
            params['start_vm'] = 'yes'
            vm_name = params['main_vm']
            error_context.context(
                "Launch QEMU with a serial port as TLS server", logging.info)
            env_process.preprocess_vm(test, params, env, vm_name)
            error_context.context("Run gnutls client to connect TLS server",
                                  logging.info)
            tls_client = aexpect.run_bg(gnutls_cmd_client)
            if not utils_misc.wait_for(
                    lambda: expected_msg in tls_client.get_output(),
                    first=5,
                    timeout=15):
                test.fail("TLS client can't connect server succssfully.")

        # Scenario 3:
        guest_cmd = params.get("guest_cmd")
        if guest_cmd:
            params["start_vm"] = "yes"
            vms = params.get("vms").split()
            params["extra_params"] = params["extra_params_%s" %
                                            vms[0]] % (hostname, port)
            error_context.context(
                "Launch QEMU with a serial port as TLS server", logging.info)
            env_process.preprocess_vm(test, params, env, vms[0])
            vm1 = env.get_vm(vms[0])
            session_vm1 = vm1.wait_for_login()
            session_vm1.cmd(guest_cmd)
            params["extra_params"] = params["extra_params_%s" %
                                            vms[1]] % (hostname, port)
            error_context.context(
                "Launch QEMU with a serial port as TLS client", logging.info)
            env_process.preprocess_vm(test, params, env, vms[1])
            try:
                session_vm1.read_until_output_matches([expected_msg],
                                                      timeout=15)
            except aexpect.ExpectError:
                test.fail("Can't connect TLS client inside TLS server guest.")
            vm2 = env.get_vm(vms[1])
            session_vm1.close()
            vm1.destroy()
            vm2.destroy()
    finally:
        gnutls_pid = process.getoutput("pgrep -f gnutls", shell=True)
        if gnutls_pid:
            process.run("pkill -9 gnutls")
        process.run(clean_cmd)
コード例 #34
0
    global _unattended_server_thread_event
    if _unattended_server_thread is not None:
        _unattended_server_thread_event.set()
        _unattended_server_thread.join(3)
        _unattended_server_thread = None

    global _syslog_server_thread
    global _syslog_server_thread_event
    if _syslog_server_thread is not None:
        _syslog_server_thread_event.set()
        _syslog_server_thread.join(3)
        _syslog_server_thread = None

    time_elapsed = time.time() - start_time
    logging.info("Guest reported successful installation after %d s (%d min)",
                 time_elapsed, time_elapsed / 60)

    if params.get("shutdown_cleanly", "yes") == "yes":
        shutdown_cleanly_timeout = int(
            params.get("shutdown_cleanly_timeout", 120))
        logging.info("Wait for guest to shutdown cleanly")
        if params.get("medium", "cdrom") == "import":
            vm.shutdown()
        try:
            if utils_misc.wait_for(vm.is_dead, shutdown_cleanly_timeout, 1, 1):
                logging.info("Guest managed to shutdown cleanly")
        except qemu_monitor.MonitorError, e:
            logging.warning(
                "Guest apparently shut down, but got a "
                "monitor error: %s", e)
コード例 #35
0
ファイル: watchdog.py プロジェクト: zhenyzha/tp-libvirt
def run(test, params, env):
    """
    Test watchdog device:

    1.Add watchdog device to the guest xml.
    2.Start the guest.
    3.Trigger the watchdog in the guest.
    4.Confirm the guest status.
    """
    def trigger_watchdog(model):
        """
        Trigger watchdog

        :param model: action when watchdog triggered
        """
        watchdog_device = "device %s" % model
        if action == "dump":
            watchdog_action = "watchdog-action pause"
        else:
            watchdog_action = "watchdog-action %s" % action
        if not hotplug_test:
            vm_pid = vm.get_pid()
            with open("/proc/%s/cmdline" % vm_pid) as vm_cmdline_file:
                vm_cmdline = vm_cmdline_file.read()
                vm_cmdline = vm_cmdline.replace('\x00', ' ')
                if not all(option in vm_cmdline
                           for option in (watchdog_device, watchdog_action)):
                    test.fail("Can not find %s or %s in qemu cmd line" %
                              (watchdog_device, watchdog_action))
        cmd = "gsettings set org.gnome.settings-daemon.plugins.power button-power shutdown"
        session.cmd(cmd, ignore_all_errors=True)
        try:
            try_modprobe(model, session, test)
            logging.info("dmesg watchdog messages: %s" % session.cmd(
                "dmesg | grep -i %s" % model, ignore_all_errors=True))
            session.cmd("lsmod | grep %s" % model)
            session.cmd("echo 1 > /dev/watchdog")
        except aexpect.ShellCmdError as e:
            session.close()
            test.fail("Failed to trigger watchdog: %s" % e)

    def try_modprobe(model, session, test):
        """
        Tries to load watchdog kernel module, fails test on error
        :param model: watchdog model, e.g. diag288
        :param session: guest session to run command
        :param test: test object
        :return: None
        """
        handled_types = {"ib700": "ib700wdt", "diag288": "diag288_wdt"}
        if model not in handled_types.keys():
            return
        module = handled_types.get(model)
        try:
            session.cmd("modprobe %s" % module)
        except aexpect.ShellCmdError:
            session.close()
            test.fail("Failed to load module %s" % module)

    def watchdog_attached(vm_name):
        """
        Confirm whether watchdog device is attached to vm by checking domain dumpxml

        :param vm_name: vm name
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if vmxml.xmltreefile.find('devices/watchdog'):
            return True
        else:
            return False

    def confirm_guest_status():
        """
        Confirm the guest status after watchdog triggered
        """
        def _booting_completed():
            session = vm.wait_for_login()
            status = None
            second_boot_time = None
            try:
                status, second_boot_time = session.cmd_status_output(
                    "uptime --since")
                logging.debug("The second boot time is %s", second_boot_time)
            except (aexpect.ShellStatusError,
                    aexpect.ShellProcessTerminatedError) as e:
                logging.error("Exception caught:%s", e)

            session.close()
            return second_boot_time > first_boot_time

        def _inject_nmi():
            session = vm.wait_for_login()
            status, output = session.cmd_status_output("dmesg | grep -i nmi")
            session.close()
            if status == 0:
                logging.debug(output)
                return True
            return False

        def _inject_nmi_event():
            virsh_session.send_ctrl("^C")
            output = virsh_session.get_stripped_output()
            if "inject-nmi" not in output:
                return False
            return True

        def _check_dump_file(dump_path, domain_id):
            dump_file = glob.glob('%s%s-*' % (dump_path, domain_id))
            if len(dump_file):
                logging.debug("Find the auto core dump file:\n%s",
                              dump_file[0])
                os.remove(dump_file[0])
                return True
            return False

        if action in ["poweroff", "shutdown"]:
            if not utils_misc.wait_for(lambda: vm.state() == "shut off", 180,
                                       10):
                test.fail("Guest not shutdown after watchdog triggered")
            else:
                logging.debug(
                    "Guest is in shutdown state after watchdog triggered")
        elif action == "reset":
            if not utils_misc.wait_for(_booting_completed, 600, 10):
                test.fail("Guest not reboot after watchdog triggered")
            else:
                logging.debug("Guest is rebooted after watchdog triggered")
        elif action == "pause":
            if utils_misc.wait_for(lambda: vm.state() == "paused", 180, 10):
                logging.debug(
                    "Guest is in paused status after watchdog triggered.")
                cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip()
                logging.debug("Check guest status: %s\n", cmd_output)
                if cmd_output != "paused (watchdog)":
                    test.fail(
                        "The domstate is not correct after dump by watchdog")
            else:
                test.fail("Guest not pause after watchdog triggered")
        elif action == "none":
            if utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10):
                test.fail("Guest shutdown unexpectedly")
            else:
                logging.debug(
                    "Guest is not in shutoff state since watchdog action is none."
                )
        elif action == "inject-nmi":
            if model != "diag288" and not utils_misc.wait_for(
                    _inject_nmi, 180, 10):
                test.fail(
                    "Guest not receive inject-nmi after watchdog triggered\n")
            elif not utils_misc.wait_for(_inject_nmi_event, 180, 10):
                test.fail("No inject-nmi watchdog event caught")
            else:
                logging.debug(
                    "Guest received inject-nmi and inject-nmi watchdog event "
                    " has been caught.")
            virsh_session.close()
        elif action == "dump":
            domain_id = vm.get_id()
            dump_path = "/var/lib/libvirt/qemu/dump/"
            if not utils_misc.wait_for(
                    lambda: _check_dump_file(dump_path, domain_id), 180, 10):
                test.fail(
                    "No auto core dump file found after watchdog triggered")
            else:
                logging.debug(
                    "VM core has been dumped after watchdog triggered.")

    name_length = params.get("name_length", "default")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    model = params.get("model")
    action = params.get("action")
    model_test = params.get("model_test") == "yes"
    hotplug_test = params.get("hotplug_test") == "yes"
    hotunplug_test = params.get("hotunplug_test") == "yes"
    machine_type = params.get("machine_type")

    if machine_type == "q35" and model == "ib700":
        test.cancel("ib700wdt watchdog device is not supported "
                    "on guest with q35 machine type")

    # Backup xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Rename the guest name to the length defined in the config file
    if name_length != "default":
        origin_name = vm_name
        name_length = int(params.get("name_length", "1"))
        vm_name = ''.join([
            random.choice(string.ascii_letters + string.digits)
            for _ in range(name_length)
        ])
        vm_xml.VMXML.vm_rename(vm, vm_name)
        # Generate the renamed xml file
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Coldplug pcie-to-pci-bridge to vm xml for q35 guest as i6300esb watchdog
    # device can only be plugged to conventional PCI slot
    if (machine_type == 'q35'
            and not vmxml.get_controllers(controller_type='pci',
                                          model='pcie-to-pci-bridge')):
        logging.debug(
            "Add pcie-root-port and pcie-to-pci-bridge controller to vm")
        pcie_root_port = Controller("pci")
        pcie_pci_bridge = Controller("pci")
        pcie_root_port.model = "pcie-root-port"
        pcie_pci_bridge.model = "pcie-to-pci-bridge"
        pcie_root_port.model_name = {'name': 'pcie-root-port'}
        pcie_pci_bridge.model_name = {'name': 'pcie-pci-bridge'}
        vmxml.add_device(pcie_root_port)
        vmxml.add_device(pcie_pci_bridge)
        vmxml.sync()

    if hotplug_test:
        vm.start()
        session = vm.wait_for_login()

    # Add watchdog device to domain
    vmxml.remove_all_device_by_type('watchdog')
    watchdog_dev = Watchdog()
    watchdog_dev.model_type = model
    watchdog_dev.action = action
    chars = string.ascii_letters + string.digits + '-_'
    alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64)))
    watchdog_dev.alias = {'name': alias_name}

    try:
        if model_test or hotunplug_test:
            vmxml.add_device(watchdog_dev)
            vmxml.sync()
            try:
                vm.start()
            except Exception:
                test.fail("VM startup after adding watchdog device failed!")

        elif hotplug_test:
            watchdog_xml = watchdog_dev.xml
            attach_result = virsh.attach_device(vm_name,
                                                watchdog_xml,
                                                ignore_status=False,
                                                debug=True)
            if not utils_misc.wait_for(lambda: watchdog_attached(vm.name), 60):
                test.fail("Failed to hotplug watchdog device.")
        session = vm.wait_for_login()

        # No need to trigger watchdog after hotunplug
        if hotunplug_test:
            cur_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            cur_watchdog = cur_xml.xmltreefile.find('devices/watchdog')
            cur_watchdog_xml = Watchdog.new_from_element(cur_watchdog).xml
            detach_result = virsh.detach_device(vm_name,
                                                cur_watchdog_xml,
                                                ignore_status=True,
                                                debug=True)
            if detach_result.exit_status:
                test.fail(
                    "i6300esb watchdog device can NOT be detached successfully, "
                    "result:\n%s" % detach_result)
            elif not utils_misc.wait_for(
                    lambda: not watchdog_attached(vm.name), 60):
                test.fail("Failed to hotunplug watchdog device.")
            return

        if action == "reset":
            status, first_boot_time = session.cmd_status_output(
                "uptime --since")
            logging.info("The first boot time is %s\n", first_boot_time)
        if action == "inject-nmi":
            virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                               auto_close=True)
            event_cmd = "event --event watchdog --all --loop"
            virsh_session.sendline(event_cmd)
        trigger_watchdog(model)
        confirm_guest_status()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if name_length != "default":
            vm_xml.VMXML.vm_rename(vm, origin_name)
        backup_xml.sync()
コード例 #36
0
    def add_device(pci_num):
        reference_cmd = params["reference_cmd"]
        find_pci_cmd = params["find_pci_cmd"]
        info_pci_ref = vm.monitor.info("pci")
        reference = session.cmd_output(reference_cmd)
        active_nics = get_active_network_device(session, nic_filter)
        try:
            # get function for adding device.
            add_fuction = local_functions["%s_iov" % cmd_type]
        except Exception:
            raise error.TestError(
                "No function for adding sr-iov dev with '%s'" % cmd_type)
        after_add = None
        if add_fuction:
            # Do add pci device.
            after_add = add_fuction(pci_num)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                output = session.cmd_output(reference_cmd)
                return output != reference

            # Define a helper function to make sure new nic could get ip.
            def _check_ip():
                post_nics = get_active_network_device(session, nic_filter)
                return (len(active_nics) <= len(post_nics)
                        and active_nics != post_nics)

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output(find_pci_cmd)
                if re.search(match_string, output, re.IGNORECASE):
                    return True
                else:
                    return False

            error.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                raise error.TestFail("No new PCI device shown after executing "
                                     "monitor command: 'info pci'")

            secs = int(params["wait_secs_for_hook_up"])
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                raise error.TestFail(
                    "No new device shown in output of command "
                    "executed inside the guest: %s" % reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                raise error.TestFail("New add device not found in guest. "
                                     "Command was: %s" % find_pci_cmd)

            # Test the newly added device
            if not utils_misc.wait_for(_check_ip, 30, 3, 3):
                ifconfig = session.cmd_output("ifconfig -a")
                raise error.TestFail("New hotpluged device could not get ip "
                                     "after 30s in guest. guest ifconfig "
                                     "output: \n%s" % ifconfig)
            try:
                session.cmd(params["pci_test_cmd"] % (pci_num + 1))
            except aexpect.ShellError, e:
                raise error.TestFail("Check device failed after PCI "
                                     "hotplug. Output: %r" % e.output)

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise
コード例 #37
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    local_hostname = params.get("loal_hostname", "localhost")
    local_ip = params.get("local_ip", "127.0.0.1")
    local_user_name = params.get("local_user_name", "root")
    local_user_password = params.get("local_user_password", "redhat")
    tmp_dir = data_dir.get_tmp_dir()
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    expect_backup_canceled = "yes" == params.get("expect_backup_canceled")
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    # TLS service config
    tls_enabled = "yes" == params.get("tls_enabled")
    tls_x509_verify = "yes" == params.get("tls_x509_verify")
    custom_pki_path = "yes" == params.get("custom_pki_path")
    tls_client_ip = tls_server_ip = local_ip
    tls_client_cn = tls_server_cn = local_hostname
    tls_client_user = tls_server_user = local_user_name
    tls_client_pwd = tls_server_pwd = local_user_password
    tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert")
    tls_error = "yes" == params.get("tls_error")
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")

    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")
    if tls_enabled and not libvirt_version.version_compare(6, 6, 0):
        test.cancel("Current libvirt version doesn't support pull mode "
                    "backup with tls nbd.")

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare tls env
        if tls_enabled:
            # Prepare pki
            tls_config = {
                "qemu_tls": "yes",
                "auto_recover": "yes",
                "client_ip": tls_client_ip,
                "server_ip": tls_server_ip,
                "client_cn": tls_client_cn,
                "server_cn": tls_server_cn,
                "client_user": tls_client_user,
                "server_user": tls_server_user,
                "client_pwd": tls_client_pwd,
                "server_pwd": tls_server_pwd,
            }
            if custom_pki_path:
                pki_path = os.path.join(tmp_dir, "inc_bkup_pki")
            else:
                pki_path = "/etc/pki/libvirt-backup/"
            if tls_x509_verify:
                tls_config["client_ip"] = tls_client_ip
            tls_config["custom_pki_path"] = pki_path
            tls_obj = TLSConnection(tls_config)
            tls_obj.conn_setup(True, tls_provide_client_cert)
            logging.debug("TLS certs in: %s" % pki_path)
            # Set qemu.conf
            qemu_config = LibvirtQemuConfig()
            if tls_x509_verify:
                qemu_config.backup_tls_x509_verify = True
            else:
                qemu_config.backup_tls_x509_verify = False
            if custom_pki_path:
                qemu_config.backup_tls_x509_cert_dir = pki_path
            utils_libvirtd.Libvirtd().restart()

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = nbd_hostname
                backup_server_dict["port"] = nbd_tcp_port
                if tls_enabled:
                    backup_server_dict["tls"] = "yes"
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_path = None
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_path = os.path.join(tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file", scratch_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["attrs"]["file"] = scratch_path
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        scratch_params["attrs"]["dev"] = scratch_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    if scratch_luks_encrypted:
                        encryption_dict = {
                            "encryption": "luks",
                            "secret": {
                                "type": "passphrase",
                                "uuid": luks_secret_uuid
                            }
                        }
                        scratch_params["encryption"] = encryption_dict
                    logging.debug("scratch params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()
            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml
            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               ignore_status=True,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())
            # If required, do some error operations during backup job
            error_operation = params.get("error_operation")
            if error_operation:
                if "destroy_vm" in error_operation:
                    vm.destroy(gracefully=False)
                if "kill_qemu" in error_operation:
                    utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL)
                if utils_misc.wait_for(
                        lambda: utils_backup.is_backup_canceled(vm_name),
                        timeout=5):
                    raise utils_backup.BackupCanceledError()
                elif expect_backup_canceled:
                    test.fail("Backup job should be canceled but not.")
            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_export": nbd_export_name
            }
            if nbd_protocol == "unix":
                nbd_params["nbd_socket"] = nbd_socket
            elif nbd_protocol == "tcp":
                nbd_params["nbd_hostname"] = nbd_hostname
                nbd_params["nbd_tcp_port"] = nbd_tcp_port
                if tls_enabled:
                    nbd_params["tls_dir"] = pki_path
                    nbd_params["tls_server_ip"] = tls_server_ip
            if not is_incremental:
                # Do full backup
                try:
                    utils_backup.pull_full_backup_to_file(
                        nbd_params, backup_file_path)
                except Exception as details:
                    if tls_enabled and tls_error:
                        raise utils_backup.BackupTLSError(details)
                    else:
                        test.fail("Fail to get full backup data: %s" % details)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            # Check if scratch file encrypted
            if scratch_luks_encrypted and scratch_path:
                cmd = "qemu-img info -U %s" % scratch_path
                result = process.run(cmd, shell=True,
                                     verbose=True).stdout_text.strip()
                if (not re.search("format.*luks", result, re.IGNORECASE)
                        or not re.search("encrypted.*yes", result,
                                         re.IGNORECASE)):
                    test.fail("scratch file/dev is not encrypted by LUKS")
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as detail:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail("Backup failed to start: %s" % detail)
    except utils_backup.BackupTLSError as detail:
        if tls_error:
            logging.debug("Failed to get backup data as expected.")
        else:
            test.fail("Failed to get tls backup data: %s" % detail)
    except utils_backup.BackupCanceledError as detail:
        if expect_backup_canceled:
            logging.debug("Backup canceled as expected.")
            if not vm.is_alive():
                logging.debug("Check if vm can be started again when backup "
                              "canceled.")
                vm.start()
                vm.wait_for_login().close()
        else:
            test.fail("Backup job canceled: %s" % detail)
    finally:
        # Remove checkpoints
        clean_checkpoint_metadata = not vm.is_alive()
        if "error_operation" in locals() and "kill_qemu" in error_operation:
            clean_checkpoint_metadata = True
        utils_backup.clean_checkpoints(
            vm_name, clean_metadata=clean_checkpoint_metadata)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)

        # Recover qemu.conf
        if "qemu_config" in locals():
            qemu_config.restore()

        # Remove tls object
        if "tls_obj" in locals():
            del tls_obj

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
コード例 #38
0
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid
    elif domid and vm_ref == "hex_id":
        vm_ref = hex(int(domid))

    try:
        # Check whether qemu-guest-agent is active in guest
        session = vm.wait_for_login()

        def verify_alive():
            return utils_misc.get_guest_service_status(
                    session, 'qemu-guest-agent') == 'active'
        if not utils_misc.wait_for(verify_alive, 30):
            test.error('Service "qemu-guest-agent" is not active')
        # Run virsh command
        cmd_result = virsh.qemu_agent_command(vm_ref, cmd, options,
                                              ignore_status=True,
                                              debug=True)
        status = cmd_result.exit_status

        # Check result
        if not libvirtd_inst.is_running():
            raise error.TestFail("Libvirtd is not running after run command.")
        if status_error:
            if not status:
                # Bug 853673
                err_msg = "Expect fail but run successfully, please check Bug: "
                err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id=853673"
コード例 #39
0
def run(test, params, env):
    """
    Test command: virsh domif-setlink and domif-getlink.

    The command   set and get link state of a virtual interface
    1. Prepare test environment.
    2. Perform virsh domif-setlink and domif-getlink operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    def domif_setlink(vm, device, operation, options):
        """
        Set the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param opration : domain virtual interface state
        :param options : some options like --config

        """

        return virsh.domif_setlink(vm, device, operation, options, debug=True)

    def domif_getlink(vm, device, options):
        """
        Get the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param options : some options like --config

        """

        return virsh.domif_getlink(vm,
                                   device,
                                   options,
                                   ignore_status=True,
                                   debug=True)

    def guest_cmd_check(cmd, session, pattern):
        """
        Check cmd output with pattern in session
        """
        try:
            cmd_status, output = session.cmd_status_output(cmd, timeout=10)
            logging.info("exit: %s, output: %s", cmd_status, output)
            return re.search(pattern, output)
        except (aexpect.ShellTimeoutError, aexpect.ShellStatusError) as e:
            logging.debug(e)
            return re.search(pattern, str(e.__str__))

    def guest_if_state(if_name, session):
        """
        Get the domain link state from the guest
        """
        # Get link state by ethtool
        cmd = "ethtool %s" % if_name
        pattern = "Link detected: ([a-zA-Z]+)"
        ret = guest_cmd_check(cmd, session, pattern)
        if ret:
            return ret.group(1) == "yes"
        else:
            return False

    def check_update_device(vm, if_name, session):
        """
        Change link state by upadte-device command, Check the results
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)

        # Get interface xml object
        iface = vmxml.get_devices(device_type="interface")[0]
        if iface.address:
            del iface.address

        # Change link state to up
        iface.link_state = "up"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name,
                                  iface.xml,
                                  ignore_status=True,
                                  debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to up state")
            return False
        if not utils_misc.wait_for(lambda: guest_if_state(if_name, session),
                                   5):
            logging.error("Guest link should be up now")
            return False

        # Change link state to down
        iface.link_state = "down"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name,
                                  iface.xml,
                                  ignore_status=True,
                                  debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to down state")
            return False
        if utils_misc.wait_for(lambda: guest_if_state(if_name, session), 5):
            logging.error("Guest link should be down now")
            return False

        # Passed all test
        return True

    vm_name = []
    # vm_name list:first element for original name in config
    vm_name.append(params.get("main_vm", "avocado-vt-vm1"))
    vm = env.get_vm(vm_name[0])
    options = params.get("if_options", "--config")
    start_vm = params.get("start_vm", "no")
    domain = params.get("domain", "name")
    if_device = params.get("if_device", "net")
    if_name = params.get("if_name", "vnet0")
    if_operation = params.get("if_operation", "up")
    status_error = params.get("status_error", "no")
    mac_address = vm.get_virsh_mac_address(0)
    model_type = params.get("model_type", "virtio")
    check_link_state = "yes" == params.get("check_link_state", "no")
    check_link_by_update_device = "yes" == params.get("excute_update_device",
                                                      "no")
    device = "vnet0"
    username = params.get("username")
    password = params.get("password")
    post_action = params.get("post_action")
    save_file = os.path.join(data_dir.get_data_dir(), "vm.save")

    # Back up xml file.
    vm_xml_file = os.path.join(data_dir.get_data_dir(), "vm.xml")
    virsh.dumpxml(vm_name[0], extra="--inactive", to_file=vm_xml_file)

    # Update model type of the interface, delete the pci address to let libvirt
    # generate a new one suitable for the model
    if vm.is_alive():
        vm.destroy()
    iface_dict = {'model': model_type, 'del_addr': 'yes'}
    libvirt.modify_vm_iface(vm_name[0], "update_iface", iface_dict)
    # Vm status
    if start_vm == "yes" and vm.is_dead():
        vm.start()

    elif start_vm == "no" and vm.is_alive():
        vm.destroy()

    # vm_name list: second element for 'domain' in virsh command
    if domain == "ID":
        # Get ID for the running domain
        vm_name.append(vm.get_id())
    elif domain == "UUID":
        # Get UUID for the domain
        vm_name.append(vm.get_uuid())
    elif domain == "no_match_UUID":
        # Generate a random UUID
        vm_name.append(uuid.uuid1())
    elif domain == "no_match_name":
        # Generate a random string as domain name
        vm_name.append(utils_misc.generate_random_string(6))
    elif domain == " ":
        # Set domain name empty
        vm_name.append("''")
    else:
        # Set domain name
        vm_name.append(vm_name[0])

    try:
        # Test device net or mac address
        if if_device == "net" and vm.is_alive():
            device = if_name
            # Get all vm's interface device
            device = vm_xml.VMXML.get_net_dev(vm_name[0])[0]

        elif if_device == "mac":
            device = mac_address

        # Test no exist device
        if if_device == "no_exist_net":
            device = "vnet-1"
        elif if_device == "no_exist_mac":
            # Generate random mac address for negative test
            device = utils_net.VirtIface.complete_mac_address("01:02")
        elif if_device == " ":
            device = "''"

        # Setlink opertation
        result = domif_setlink(vm_name[1], device, if_operation, options)
        status = result.exit_status
        logging.info("Setlink done")

        if post_action == "restart_libvirtd":
            utils_libvirtd.libvirtd_restart()
        elif post_action == "save_restore":
            vm.save_to_file(save_file)
            vm.restore_from_file(save_file)

        # Getlink opertation
        get_result = domif_getlink(vm_name[1], device, options)
        getlink_output = get_result.stdout.strip()

        # Check the getlink command output
        if status_error == "no":
            if not re.search(if_operation, getlink_output):
                test.fail("Getlink result should "
                          "equal with setlink operation")

        logging.info("Getlink done")

        # Check guest xml about link status
        if post_action == "save_restore":
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
            iface = vmxml.get_devices(device_type="interface")[0]
            logging.debug("Guest current interface xml is %s" % iface)
            if iface.link_state != if_operation:
                test.fail("link state in guest xml should be %s" %
                          if_operation)

        # If --config or --persistent is given should restart the vm then test link status
        if any(options == option
               for option in ["--config", "--persistent"]) and vm.is_alive():
            vm.destroy()
            vm.start()
            logging.info("Restart VM")

        elif start_vm == "no":
            vm.start()

        error_msg = None
        if status_error == "no" and not post_action:
            # Serial login the vm to check link status
            # Start vm check the link statue
            session = vm.wait_for_serial_login(username=username,
                                               password=password)
            guest_if_name = utils_net.get_linux_ifname(session, mac_address)

            # Check link state in guest
            if check_link_state:
                if (if_operation == "up"
                        and not guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be up in guest"
                if (if_operation == "down"
                        and guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be down in guest"
                if error_msg:
                    test.fail(error_msg)

            # Test of setting link state by update_device command
            if check_link_by_update_device:
                if not check_update_device(vm, guest_if_name, session):
                    test.fail("Check update_device failed")

            # Set the link up make host connect with vm
            domif_setlink(vm_name[0], device, "up", "")
            if not utils_misc.wait_for(
                    lambda: guest_if_state(guest_if_name, session), 5):
                test.fail("Link state isn't up in guest")

            # Ignore status of this one
            cmd = 'ip link set %s down;' % guest_if_name
            session.cmd_status_output(cmd, timeout=10)
            pattern = "%s:.*state DOWN.*" % guest_if_name
            pattern_cmd = 'ip addr show dev %s' % guest_if_name
            guest_cmd_check(pattern_cmd, session, pattern)

            cmd = 'ip link set %s up;' % guest_if_name
            session.cmd_status_output(cmd, timeout=10)
            pattern = "%s:.*state UP.*" % guest_if_name
            if not utils_misc.wait_for(
                    lambda: guest_cmd_check(pattern_cmd, session, pattern),
                    timeout=20):
                test.fail("Could not bring up interface %s inside guest" %
                          guest_if_name)
        else:  # negative test
            # stop guest, so state is always consistent on next start
            vm.destroy()
        # Check status_error
        if status_error == "yes":
            if status:
                logging.info("Expected error (negative testing). Output: %s",
                             result.stderr.strip())

            else:
                test.fail("Unexpected return code %d "
                          "(negative testing)" % status)
        elif status_error != "no":
            test.error("Invalid value for status_error '%s' "
                       "(must be 'yes' or 'no')" % status_error)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(vm_name[0])
        virsh.define(vm_xml_file)
        os.remove(vm_xml_file)
        if os.path.exists(save_file):
            os.remove(save_file)
コード例 #40
0
ファイル: netperf.py プロジェクト: aliang123/tp-qemu
def launch_client(sessions, server, server_ctl, host, clients, l, nf_args,
                  port, params, server_cyg):
    """ Launch netperf clients """

    netperf_version = params.get("netperf_version", "2.6.0")
    client_path = "/tmp/netperf-%s/src/netperf" % netperf_version
    server_path = "/tmp/netperf-%s/src/netserver" % netperf_version
    get_status_flag = params.get("get_status_in_guest", "no") == "yes"
    global _netserver_started
    # Start netserver
    if _netserver_started:
        logging.debug("Netserver already started.")
    else:
        error.context("Start Netserver on guest", logging.info)
        if params.get("os_type") == "windows":
            timeout = float(params.get("timeout", "240"))
            cdrom_drv = utils_misc.get_winutils_vol(server_ctl)
            if params.get("use_cygwin") == "yes":
                netserv_start_cmd = params.get("netserv_start_cmd")
                netperf_src = params.get("netperf_src") % cdrom_drv
                cygwin_root = params.get("cygwin_root")
                netserver_path = params.get("netserver_path")
                netperf_install_cmd = params.get("netperf_install_cmd")
                start_session = server_cyg
                logging.info("Start netserver with cygwin, cmd is: %s" %
                             netserv_start_cmd)
                if "netserver" not in server_ctl.cmd_output("tasklist"):
                    netperf_pack = "netperf-%s" % params.get("netperf_version")
                    s_check_cmd = "dir %s" % netserver_path
                    p_check_cmd = "dir %s" % cygwin_root
                    if not ("netserver.exe" in server_ctl.cmd(s_check_cmd)
                            and netperf_pack in server_ctl.cmd(p_check_cmd)):
                        error.context(
                            "Install netserver in Windows guest cygwin",
                            logging.info)
                        cmd = "xcopy %s %s /S /I /Y" % (netperf_src,
                                                        cygwin_root)
                        server_ctl.cmd(cmd)
                        server_cyg.cmd_output(netperf_install_cmd,
                                              timeout=timeout)
                        if "netserver.exe" not in server_ctl.cmd(s_check_cmd):
                            err_msg = "Install netserver cygwin failed"
                            raise error.TestNAError(err_msg)
                        logging.info(
                            "Install netserver in cygwin successfully")
            else:
                start_session = server_ctl
                netserv_start_cmd = params.get("netserv_start_cmd") % cdrom_drv
                logging.info("Start netserver without cygwin, cmd is: %s" %
                             netserv_start_cmd)

            error.context("Start netserver on windows guest", logging.info)
            start_netserver_win(start_session, netserv_start_cmd)

        else:
            logging.info("Netserver start cmd is '%s'" % server_path)
            ssh_cmd(server_ctl, "pidof netserver || %s" % server_path)
            ncpu = ssh_cmd(server_ctl,
                           "cat /proc/cpuinfo |grep processor |wc -l")
            ncpu = re.findall(r"\d+", ncpu)[-1]

        logging.info("Netserver start successfully")

    def count_interrupt(name):
        """
        Get a list of interrut number for each queue

        @param name: the name of interrupt, such as "virtio0-input"
        """
        sum = 0
        intr = []
        stat = ssh_cmd(server_ctl, "grep %s /proc/interrupts" % name)
        for i in stat.strip().split("\n"):
            for cpu in range(int(ncpu)):
                sum += int(i.split()[cpu + 1])
            intr.append(sum)
            sum = 0
        return intr

    def get_state():
        for i in ssh_cmd(server_ctl, "ifconfig").split("\n\n"):
            if server in i:
                ifname = re.findall(r"(\w+\d+)[:\s]", i)[0]

        path = "find /sys/devices|grep net/%s/statistics" % ifname
        cmd = "%s/rx_packets|xargs cat;%s/tx_packets|xargs cat;" \
            "%s/rx_bytes|xargs cat;%s/tx_bytes|xargs cat" % (path,
                                                             path, path, path)
        output = ssh_cmd(server_ctl, cmd).split()[-4:]

        nrx = int(output[0])
        ntx = int(output[1])
        nrxb = int(output[2])
        ntxb = int(output[3])

        nre = int(
            ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1").split()[12])
        state_list = [
            'rx_pkts', nrx, 'tx_pkts', ntx, 'rx_byts', nrxb, 'tx_byts', ntxb,
            're_pkts', nre
        ]
        try:
            nrx_intr = count_interrupt("virtio.-input")
            ntx_intr = count_interrupt("virtio.-output")
            sum = 0
            for i in range(len(nrx_intr)):
                state_list.append('rx_intr_%s' % i)
                state_list.append(nrx_intr[i])
                sum += nrx_intr[i]
            state_list.append('rx_intr_sum')
            state_list.append(sum)

            sum = 0
            for i in range(len(ntx_intr)):
                state_list.append('tx_intr_%s' % i)
                state_list.append(ntx_intr[i])
                sum += ntx_intr[i]
            state_list.append('tx_intr_sum')
            state_list.append(sum)

        except IndexError:
            ninit = count_interrupt("virtio.")
            state_list.append('intr')
            state_list.append(ninit)

        exits = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/exits"))
        state_list.append('exits')
        state_list.append(exits)

        return state_list

    def netperf_thread(i, numa_enable, client_s, timeout):
        cmd = ""
        fname = "/tmp/netperf.%s.nf" % pid
        if numa_enable:
            n = abs(int(params.get("numa_node"))) - 1
            cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n)
        cmd += "/tmp/netperf_agent.py %d %s -D 1 -H %s -l %s %s" % (
            i, client_path, server, int(l) * 1.5, nf_args)
        cmd += " >> %s" % fname
        logging.info("Start netperf thread by cmd '%s'" % cmd)
        ssh_cmd(client_s, cmd)

    def all_clients_up():
        try:
            content = ssh_cmd(clients[-1], "cat %s" % fname)
        except:
            content = ""
            return False
        if int(sessions) == len(re.findall("MIGRATE", content)):
            return True
        return False

    def parse_demo_result(fname, sessions):
        """
        Process the demo result, remove the noise from head,
        and compute the final throughout.

        :param fname: result file name
        :param sessions: sessions' number
        """
        fd = open(fname)
        lines = fd.readlines()
        fd.close()

        for i in range(1, len(lines) + 1):
            if "AF_INET" in lines[-i]:
                break
        nresult = i - 1
        if nresult < int(sessions):
            raise error.TestError("We couldn't expect this parallism,"
                                  "expect %s get %s" % (sessions, nresult))

        niteration = nresult / sessions
        result = 0.0
        for this in lines[-sessions * niteration:]:
            if "Interim" in this:
                result += float(re.findall(r"Interim result: *(\S+)", this)[0])
        result = result / niteration
        logging.debug("niteration: %s" % niteration)
        return result

    error.context("Start netperf client threads", logging.info)
    pid = str(os.getpid())
    fname = "/tmp/netperf.%s.nf" % pid
    ssh_cmd(clients[-1], "rm -f %s" % fname)
    numa_enable = params.get("netperf_with_numa", "yes") == "yes"
    timeout_netperf_start = int(l) * 0.5
    client_thread = threading.Thread(target=netperf_thread,
                                     kwargs={
                                         "i": int(sessions),
                                         "numa_enable": numa_enable,
                                         "client_s": clients[0],
                                         "timeout": timeout_netperf_start
                                     })
    client_thread.start()

    ret = {}
    ret['pid'] = pid

    if utils_misc.wait_for(all_clients_up, timeout_netperf_start, 0.0, 0.2,
                           "Wait until all netperf clients start to work"):
        logging.debug("All netperf clients start to work.")
    else:
        raise error.TestNAError("Error, not all netperf clients at work")

    # real & effective test starts
    if get_status_flag:
        start_state = get_state()
    ret['mpstat'] = ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1))
    finished_result = ssh_cmd(clients[-1], "cat %s" % fname)

    # stop netperf clients
    kill_cmd = "killall netperf"
    if params.get("os_type") == "windows":
        kill_cmd = "taskkill /F /IM netperf*"
    ssh_cmd(clients[-1], kill_cmd, ignore_status=True)

    # real & effective test ends
    if get_status_flag:
        end_state = get_state()
        if len(start_state) != len(end_state):
            msg = "Initial state not match end state:\n"
            msg += "  start state: %s\n" % start_state
            msg += "  end state: %s\n" % end_state
            logging.warn(msg)
        else:
            for i in range(len(end_state) / 2):
                ret[end_state[i * 2]] = (end_state[i * 2 + 1] -
                                         start_state[i * 2 + 1])

    client_thread.join()

    error.context("Testing Results Treatment and Report", logging.info)
    f = open(fname, "w")
    f.write(finished_result)
    f.close()
    ret['thu'] = parse_demo_result(fname, int(sessions))
    return ret
コード例 #41
0
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """
    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()

        # Enable vm incremental backup capability. This is only a workaround
        # to make sure incremental backup can work for the vm. Code needs to
        # be removded immediately when the function enabled by default, which
        # is tracked by bz1799015
        tree = ET.parse(vmxml.xml)
        root = tree.getroot()
        for elem in root.iter('domain'):
            elem.set('xmlns:qemu',
                     'http://libvirt.org/schemas/domain/qemu/1.0')
            qemu_cap = ET.Element("qemu:capabilities")
            elem.insert(-1, qemu_cap)
            incbackup_cap = ET.Element("qemu:add")
            incbackup_cap.set('capability', 'incremental-backup')
            qemu_cap.insert(1, incbackup_cap)
        vmxml.undefine()
        tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml")
        tree.write(tmp_vm_xml)
        virsh.define(tmp_vm_xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Script insert xml elements to make sure vm can support "
                      "incremental backup. This should be removded when "
                      "bz 1799015 fixed.")

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_ip = params.get("ceph_mon_ip",
                                     "libvirtauthceph.usersys.redhat.com")
            ceph_host_port = params.get("ceph_host_port", "6789")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "avocado-vt-pool/inc_bkup.qcow2")
            ceph_pool_name = ceph_disk_name.split('/')[0]
            ceph_file_name = ceph_disk_name.split('/')[1]
            ceph_client_name = params.get("ceph_client_name", "client.admin")
            ceph_client_key = params.get(
                "ceph_client_key", "AQDkY/xd2pqyLhAAPQ2Yrla/nGe1PazR4/n+IQ==")
            ceph_auth_user = params.get("ceph_auth_user", "admin")
            ceph_auth_key = params.get(
                "ceph_auth_key", "AQDkY/xd2pqyLhAAPQ2Yrla/nGe1PazR4/n+IQ==")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid,
                        "auth_in_source": True
                    }
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            ceph.rbd_image_rm(ceph_mon_ip, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'rbd',
                'source_name': ceph_disk_name,
                'source_host_name': ceph_mon_ip,
                'source_host_port': ceph_host_port
            }
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(
                            tmp_dir, target_file_name)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size,
                                                      target_driver)
                        target_params["file"] = target_file_path
                        logging.debug("target_params: %s", target_params)
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=target_blkdev_size)
                        target_params["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail(
                            "We do not support backup target type: '%s'" %
                            target_type)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            utils_backup.dd_data_to_vm_disk(vm, test_disk_in_vm, dd_bs,
                                            dd_seek, dd_count)

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target),
                    60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" %
                            backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(
                    original_data_file,
                    backup_path,
                    backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_ip, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
コード例 #42
0
def run(test, params, env):
    """
    QEMU flow caches stress test test

    1) Make sure nf_conntrack is disabled in host and guest.
       If nf_conntrack is enabled in host, skip this case.
    2) Boot guest with vhost=on/off.
    3) Enable multi queues support in guest (optional).
    4) After installation of netperf, run netserver in host.
    5) Run netperf TCP_CRR protocal test in guest.
    6) Transfer file between guest and host.
    7) Check the md5 of copied file.

    This is a sample QEMU test, so people can get used to some of the test APIs.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    msg = "Make sure nf_conntrack is disabled in host and guest."
    error.context(msg, logging.info)
    if "nf_conntrack" in utils.system_output("lsmod"):
        err = "nf_conntrack load in host, skip this case"
        raise error.TestNAError(err)

    params["start_vm"] = "yes"
    error.context("Boot up guest", logging.info)
    env_process.preprocess_vm(test, params, env, params["main_vm"])
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)
    if "nf_conntrack" in session.cmd_output("lsmod"):
        msg = "Unload nf_conntrack module in guest."
        error.context(msg, logging.info)
        black_str = "#disable nf_conntrack\nblacklist nf_conntrack\n" \
                    "blacklist nf_conntrack_ipv6\nblacklist xt_conntrack\n" \
                    "blacklist nf_conntrack_ftp\nblacklist xt_state\n" \
                    "blacklist iptable_nat\nblacklist ipt_REDIRECT\n" \
                    "blacklist nf_nat\nblacklist nf_conntrack_ipv4"
        cmd = "echo -e '%s' >> /etc/modprobe.d/blacklist.conf" % black_str
        session.cmd(cmd)
        session = vm.reboot(session, timeout=timeout)
        if "nf_conntrack" in session.cmd_output("lsmod"):
            err = "Fail to unload nf_conntrack module in guest."
            error.TestError(err)

    netperf_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"),
                                       params["netperf_link"])
    md5sum = params.get("pkg_md5sum")
    win_netperf_link = params.get("win_netperf_link")
    if win_netperf_link:
        win_netperf_link = utils_misc.get_path(
            data_dir.get_deps_dir("netperf"), win_netperf_link)
    win_netperf_md5sum = params.get("win_netperf_md5sum")
    server_path = params.get("server_path", "/var/tmp/")
    client_path = params.get("client_path", "/var/tmp/")
    win_netperf_path = params.get("win_netperf_path", "c:\\")
    client_num = params.get("netperf_client_num", 520)
    netperf_timeout = params.get("netperf_timeout", 600)
    netperf_client_ip = vm.get_address()
    host_ip = utils_net.get_host_ip_address(params)
    netperf_server_ip = params.get("netperf_server_ip", host_ip)

    username = params.get("username", "root")
    password = params.get("password", "123456")
    passwd = params.get("hostpasswd", "123456")
    client = params.get("shell_client", "ssh")
    port = params.get("shell_port", "22")

    if int(params.get("queues", 1)) > 1 and params.get("os_type") == "linux":
        error.context("Enable multi queues support in guest.", logging.info)
        guest_mac = vm.get_mac_address()
        ifname = utils_net.get_linux_ifname(session, guest_mac)
        cmd = "ethtool -L %s combined  %s" % (ifname, params.get("queues"))
        status, out = session.cmd_status_output(cmd)
        msg = "Fail to enable multi queues support in guest."
        msg += "Command %s fail output: %s" % (cmd, out)
        error.TestError(msg)

    if params.get("os_type") == "linux":
        session.cmd("iptables -F", ignore_all_errors=True)
        g_client_link = netperf_link
        g_client_path = client_path
        g_md5sum = md5sum
    elif params.get("os_type") == "windows":
        g_client_link = win_netperf_link
        g_client_path = win_netperf_path
        g_md5sum = win_netperf_md5sum

    error.context("Setup netperf in guest and host", logging.info)
    netperf_client = utils_netperf.NetperfClient(
        netperf_client_ip,
        g_client_path,
        g_md5sum,
        g_client_link,
        username=username,
        password=password,
        compile_option="--enable-burst")

    netperf_server = utils_netperf.NetperfServer(
        netperf_server_ip,
        server_path,
        md5sum,
        netperf_link,
        client,
        port,
        password=passwd,
        compile_option="--enable-burst")
    try:
        error.base_context("Run netperf test between host and guest.")
        error.context("Start netserver in host.", logging.info)
        netperf_server.start()

        error.context("Start Netperf in guest for %ss." % netperf_timeout,
                      logging.info)
        test_option = "-t TCP_CRR -l %s -- -b 10 -D" % netperf_timeout
        netperf_client.bg_start(netperf_server_ip, test_option, client_num)

        utils_misc.wait_for(lambda: not netperf_client.is_netperf_running(),
                            timeout=netperf_timeout,
                            first=590,
                            step=2)

        utils_test.run_file_transfer(test, params, env)
    finally:
        netperf_server.stop()
        netperf_client.package.env_cleanup(True)
        if session:
            session.close()
コード例 #43
0
def run(test, params, env):
    """
    Verify SLOF info with hugepage.

    Step:
     1. Assign definite size hugepage and mount it in host.
     2. Boot a guest by following ways:
         a. hugepage as backing file
         b. hugepage not as backing file
        then Check if any error info in output of SLOF.
     3. Get the size of memory inside guest.
     4. Hot plug pc-dimm by QMP.
     5. Get the size of memory after hot plug pc-dimm inside guest,
        then check the different value of memory.
     6. Reboot guest.
     7. Guest could login successfully.
     8. Guest could ping external host ip.

    :param test: Qemu test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def _wait_for_login(cur_pos=0):
        """Wait for login guest."""
        content, next_pos = slof.wait_for_loaded(vm, test, cur_pos)
        error_context.context("Check the output of SLOF.", logging.info)
        slof.check_error(test, content)

        error_context.context("Try to log into guest '%s'." % vm.name,
                              logging.info)
        timeout = float(params.get("login_timeout", 240))
        session = vm.wait_for_login(timeout=timeout)
        logging.info("log into guest '%s' successfully.", vm.name)
        return session, next_pos

    _setup_hugepage(params)

    params['start_vm'] = 'yes'
    env_process.preprocess_vm(test, params, env, params["main_vm"])

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session, next_pos = _wait_for_login()

    error_context.context('Get guest free memory size before hotplug pc-dimm.',
                          logging.info)
    orig_mem = int(session.cmd_output(cmd=params['free_mem_cmd']))
    logging.debug('Guest free memory size is %d bytes', orig_mem)

    error_context.context('Hotplug pc-dimm for guest.', logging.info)
    htp_mem = MemoryHotplugTest(test, params, env)
    htp_mem.hotplug_memory(vm, params['plug_mem_name'])

    plug_timeout = float(params.get('plug_timeout', 5))
    if not utils_misc.wait_for(
            lambda: _check_mem_increase(session, params, orig_mem),
            plug_timeout):
        test.fail("Guest memory size is not increased %s in %s sec." %
                  (params['size_plug'], params.get('plug_timeout', 5)))

    error_context.context('Reboot guest', logging.info)
    session.close()
    vm.reboot()

    session, _ = _wait_for_login(next_pos)
    error_context.context("Try to ping external host.", logging.info)
    extra_host_ip = utils_net.get_host_ip_address(params)
    session.cmd('ping %s -c 5' % extra_host_ip)
    logging.info("Ping host(%s) successfully.", extra_host_ip)

    session.close()
    vm.destroy(gracefully=True)
コード例 #44
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_info_in_audit_log_file(test_cmd, device_source):
        """
        Check if information can be found in audit.log.

        :params test_cmd: test command
        :params device_source: device source
        """
        grep_audit = ('grep "%s" /var/log/audit/audit.log' %
                      test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' +
               'grep "%s" | tail -n1 | grep "res=success"' % device_source)
        return process.run(cmd, ignore_status=True,
                           shell=True).exit_status == 0

    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = utils_disk.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith(
                                "hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel(
                    "Current libvirt version doesn't support shareable feature"
                )

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info(
                    'The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_systemlink_twice = "yes" == params.get(
        "at_dt_disk_twice_with_systemlink", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get(
        "detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    # Get additional lvm item names.
    additional_lv_names = params.get("at_dt_disk_additional_lvs", "").split()
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    # Define one empty list to locate those lvm.
    total_lvm_names = []
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    machine_type = params.get('machine_type')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_data_dir(),
                                      device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            if lv_utils.vg_check(vg_name):
                lv_utils.vg_remove(vg_name)
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
            total_lvm_names.append(device_source)
            if test_systemlink_twice:
                for lvm__item_name in additional_lv_names:
                    additional_device_source = libvirt.create_local_disk(
                        "lvm",
                        size="10M",
                        vgname=vg_name,
                        lvname=lvm__item_name)
                    logging.debug("New created volume: %s",
                                  additional_device_source)
                    total_lvm_names.append(additional_device_source)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1G",
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    if machine_type == 'q35':
        # Add more pci controllers to avoid error: No more available PCI slots
        if test_twice and params.get("add_more_pci_controllers",
                                     "yes") == "yes":
            vm_dump_xml.remove_all_device_by_type('controller')
            machine_list = vm_dump_xml.os.machine.split("-")
            vm_dump_xml.set_os_attrs(
                **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
            q35_pcie_dict0 = {
                'controller_model': 'pcie-root',
                'controller_type': 'pci',
                'controller_index': 0
            }
            q35_pcie_dict1 = {
                'controller_model': 'pcie-root-port',
                'controller_type': 'pci'
            }
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict0))
            # Add enough controllers to match multiple times disk attaching requirements
            for i in list(range(1, 15)):
                q35_pcie_dict1.update({'controller_index': "%d" % i})
                vm_dump_xml.add_device(
                    libvirt.create_controller_xml(q35_pcie_dict1))
            vm_dump_xml.sync()

    if params.get("reset_pci_controllers_nums", "no") == "yes" \
            and 'attach-disk' in test_cmd:
        libvirt_pcicontr.reset_pci_num(vm_name, 15)

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name,
                                     device_source,
                                     device_target,
                                     s_at_options,
                                     debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug(
                "Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1",
                disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(
                4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s" %
                   (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1G",
            disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref,
                                       device_source,
                                       device_target2,
                                       at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref,
                                       device_target2,
                                       dt_options,
                                       debug=True).exit_status
    if test_systemlink_twice:
        # Detach lvm previously attached.
        result = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True)
        libvirt.check_exit_status(result)
        # Remove systemlink for lv01,lv02,and lv03
        for lvm_item in total_lvm_names:
            remove_systemlink_cmd = ('lvchange -a n %s' % lvm_item)
            if process.run(remove_systemlink_cmd, shell=True).exit_status:
                logging.error("Remove systemlink failed")
        # Add new systemlink for lv01,lv02,and lv03 by shifting one position
        for index in range(0, len(total_lvm_names)):
            add_systemlink_cmd = (
                'lvchange -a y %s' %
                total_lvm_names[(index + 1) % len(total_lvm_names)])
            if process.run(add_systemlink_cmd, shell=True).exit_status:
                logging.error("Add systemlink failed")
        # Attach lvm lv01 again.
        result = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True)
        libvirt.check_exit_status(result)
        # Detach lvm 01 again.
        result = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True)
        libvirt.check_exit_status(result)

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        result = utils_misc.wait_for(
            lambda: check_info_in_audit_log_file(test_cmd, device_source),
            timeout=20)
        if not result:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target,
                                            old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_data_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': device_target,
                'target_bus': device_disk_bus
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                if test_systemlink_twice:
                    for lv_item_name in additional_lv_names:
                        libvirt.delete_local_disk("lvm",
                                                  vgname=vg_name,
                                                  lvname=lv_item_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source,
                            shell=True,
                            ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value." % test_cmd)
    else:
        if test_systemlink_twice:
            return
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file" " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after" " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after" " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file " "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure " "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
コード例 #45
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        test.cancel("This version of libvirt does not support domblkerror "
                    "test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")
    ubuntu = distro.detect().name == 'Ubuntu'
    rhel = distro.detect().name == 'rhel'
    nfs_service_package = params.get("nfs_service_package",
                                     "nfs-kernel-server")
    nfs_service = None
    selinux_bool = None
    session = None
    selinux_bak = ""

    vm = env.get_vm(vm_name)
    if error_type == "unspecified error":
        selinux_local = params.get("setup_selinux_local", "yes") == "yes"
        if not ubuntu and not rhel:
            nfs_service_package = "nfs"
        elif rhel:
            nfs_service_package = "nfs-server"
        if not rhel and not utils_package.package_install(nfs_service_package):
            test.cancel("NFS package not available in host to test")
        # backup /etc/exports
        shutil.copyfile(export_file, "%s.bak" % export_file)
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Generate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        process.run("qemu-img create %s %s" %
                    (os.path.join(img_dir, img_name), img_size),
                    shell=True,
                    verbose=True)

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = libvirt.setup_or_cleanup_nfs(is_setup=True,
                                               mount_dir=nfs_dir,
                                               is_mount=False,
                                               export_options=mount_opt,
                                               export_dir=img_dir)
            if not ubuntu:
                selinux_bak = res["selinux_status_bak"]
            process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                        "127.0.0.1:%s %s" % (img_dir, nfs_dir),
                        shell=True,
                        verbose=True)
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service(nfs_service_package)
            if not ubuntu and selinux_local:
                params['set_sebool_local'] = "yes"
                params['local_boolean_varible'] = "virt_use_nfs"
                params['local_boolean_value'] = "on"
                selinux_bool = utils_misc.SELinuxBoolean(params)
                selinux_bool.setup()

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            _pool_vol = None
            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name,
                               "fs",
                               pool_target,
                               img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {
                'file': img_path
            }})
        img_disk.driver = {
            'name': "qemu",
            'type': "raw",
            'cache': "none",
            'error_policy': "stop"
        }
        img_disk.target = {'dev': target_dev, 'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk_list_debug = %s", bef_list)

        # Attach disk to guest
        ret = virsh.attach_device(vm_name, img_disk.xml)
        if ret.exit_status != 0:
            test.fail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk list after attaching - %s", aft_list)
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # install dependent packages
            pkg_list = ["parted", "e2fsprogs"]
            for pkg in pkg_list:
                if not utils_package.package_install(pkg, session):
                    test.error("Failed to install dependent package %s" % pkg)

            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext4 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext4 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception as err:
                logging.debug("Expected Fail %s", err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            if nfs_service is not None:
                nfs_service.stop()
                logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            # If not paused, perform one more IO operation to the mnt disk
            session = vm.wait_for_login()
            session.cmd("echo 'one more write to big file' > %s/big_file" %
                        mnt_dir)
            if not utils_misc.wait_for(_check_state, 60):
                test.fail("Guest does not paused, it is %s now" % vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    test.fail("Failed to get expect result, get %s" %
                              output.stdout.strip())
            else:
                test.fail("Fail to get domblkerror info:%s" % output.stderr)
    finally:
        logging.info("Do clean steps")
        if session:
            session.close()
        if error_type == "unspecified error":
            if nfs_service is not None:
                nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            res = libvirt.setup_or_cleanup_nfs(is_setup=False,
                                               mount_dir=nfs_dir,
                                               export_dir=img_dir,
                                               restore_selinux=selinux_bak)
            if selinux_bool:
                selinux_bool.cleanup(keep_authorized_keys=True)
        elif error_type == "no space":
            vm.destroy()
            if _pool_vol:
                _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
        data_dir.clean_tmp_files()
コード例 #46
0
ファイル: ethtool.py プロジェクト: wkf31156/tp-qemu
                utils.system(dd_cmd)
            except error.CmdError, e:
                return failure

        # only capture the new tcp port after offload setup
        original_tcp_ports = re.findall(
            "tcp.*:(\d+).*%s" % guest_ip,
            utils.system_output("/bin/netstat -nap"))

        for i in original_tcp_ports:
            tcpdump_cmd += " and not port %s" % i

        txt = "Listening traffic using command: %s" % tcpdump_cmd
        error.context(txt, logging.info)
        sess.sendline(tcpdump_cmd)
        if not utils_misc.wait_for(
                lambda: session.cmd_status("pgrep tcpdump") == 0, 30):
            return (False, "Tcpdump process wasn't launched")

        txt = "Transferring file %s from %s" % (filename, src)
        error.context(txt, logging.info)
        try:
            copy_files_func(filename, filename)
        except remote.SCPError, e:
            return (False, "File transfer failed (%s)" % e)

        session.cmd("killall tcpdump")
        try:
            tcpdump_string = sess.read_up_to_prompt(timeout=60)
        except aexpect.ExpectError:
            return (False, "Failed to read tcpdump's output")
コード例 #47
0
def run(test, params, env):
    """
    Basic pyvmomi test

    1) create a snapshot
    2) power on the VM, write some fixed length data to
       the second disk.
    3) power off the VM, query the changes and compare
       the length of changed area.
    """
    def safe_power_off(conn):
        """
        Power off safely

        If the VM is poweroff state, the power_off call
        will fail, this function checks the state before
        power off operation.
        """
        power_state = conn.get_vm_summary()['power_state']
        if power_state != 'poweredOff':
            conn.power_off()

    vm_name = params.get("main_vm")
    if not vm_name:
        test.error('No VM specified')

    # vsphere server's host name or IP address
    vsphere_host = params.get("vsphere_host")
    # vsphere user
    vsphere_user = params.get("vsphere_user")
    # vsphere password
    vsphere_pwd = params.get("vsphere_pwd")

    # vm boots up timeout value, default is 5 mins
    vm_bootup_timeout = params.get("vm_bootup_timeout ", 300)
    # vm user
    vm_user = params.get("vm_user", 'root')
    # vm password
    vm_pwd = params.get("vm_pwd")

    # vm remote login client arguments setting
    vm_client = params.get("vm_client", 'ssh')
    vm_port = params.get("vm_port", 22)
    vm_prompt = params.get("vm_prompt", r"[\#\$\[\]%]")

    try:
        connect_args = {
            'host': vsphere_host,
            'user': vsphere_user,
            'pwd': vsphere_pwd
        }
        conn = VSphere(**connect_args)
        conn.target_vm = vm_name

        # Poweroff the guest first if it is Up.
        safe_power_off(conn)
        # Remove all snapshots first
        conn.remove_all_snapshots()

        # Get disk counts of the VM
        if len(conn.get_hardware_devices(
                dev_type=vim.vm.device.VirtualDisk)) < 2:
            test.error('The main_vm must have at least two disks')

        # Create a snapshot
        conn.create_snapshot()
        # Poweron the guest
        conn.power_on()
        # Wait for VM totally boots up to get the IP address
        vm_ipaddr = wait_for(lambda: conn.get_vm_summary()['ip_address'],
                             vm_bootup_timeout)
        if not vm_ipaddr:
            test.fail('Get VM IP address failed')

        LOG.info("VM's (%s) IP address is %s", vm_name, vm_ipaddr)

        conn_kwargs = {
            'client': vm_client,
            'host': vm_ipaddr,
            'port': vm_port,
            'username': vm_user,
            'password': vm_pwd,
            'prompt': vm_prompt
        }

        vm_session = wait_for_login(**conn_kwargs)

        # Write some fixed length of data
        cmd = 'dd if=/dev/urandom of=/dev/sdb bs=6000 count=10000 seek=1000'
        res = vm_session.cmd_output(cmd)
        LOG.debug('Session outputs:\n%s', res)

        # Power off at once
        conn.power_off()

        # Query the changed area
        disk_change_info = conn.query_changed_disk_areas(
            disk_label='Hard disk 2')
        if not disk_change_info.changedArea:
            test.fail('Not found any changes')

        total = 0
        for change in disk_change_info.changedArea:
            total += change.length

        LOG.info('total change length is %s', total)
        if not 60000000 <= total <= 61000000:
            test.fail('Unexpected change size')

    finally:
        safe_power_off(conn)
        # Remove all snapshots first
        conn.remove_all_snapshots()
        conn.close()
コード例 #48
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of disk

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def reboot():
        """
        Shutdown and restart guest, then wait for login
        """
        vm.destroy()
        vm.start()
        vm.wait_for_login()

    def attach(xml, device_name, plug_method="hot"):
        """
        Attach device with xml, for both hot and cold plug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for plug method
        """
        device_before_plug = find_device(vm, params)
        with open(xml) as disk_file:
            logging.debug("Attach disk by XML: %s", disk_file.read())
        file_arg = xml
        if plug_method == "cold":
            file_arg += ' --config'
        s_attach = virsh.attach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        libvirt.check_exit_status(s_attach)
        if plug_method == "cold":
            reboot()
        detect_time = params.get("detect_disk_time", 20)
        plug_disks = utils_misc.wait_for(
            lambda: get_new_device(device_before_plug, find_device(vm, params)
                                   ), detect_time)
        if not plug_disks:
            test.fail("Failed to hotplug device %s to guest" % device_name)

    def detach(xml, device_name, unplug_method="hot"):
        """
        Detach device with xml, for both hot and cold unplug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for unplug method
        """
        with open(xml) as disk_file:
            logging.debug("Detach device by XML: %s", disk_file.read())
        file_arg = xml
        if unplug_method == "cold":
            file_arg = xml + ' --config'
        s_detach = virsh.detach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        if unplug_method == "cold":
            reboot()
        libvirt.check_exit_status(s_detach)

    def attach_disk():  # pylint: disable=W0611
        """
        Sub test for attach disk, including hot and cold plug/unplug
        """
        plug_method = params.get("plug_method", "hot")
        device_source_format = params.get("at_disk_source_format", "raw")
        device_target = params.get("at_disk_target", "vdb")
        device_disk_bus = params.get("at_disk_bus", "virtio")
        device_source_name = params.get("at_disk_source", "attach.img")
        detect_time = params.get("detect_disk_time", 10)
        device_source_path = os.path.join(tmp_dir, device_source_name)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1",
            disk_format=device_source_format)

        def _generate_disk_xml():
            """Generate xml for device hotplug/unplug usage"""
            diskxml = devices.disk.Disk("file")
            diskxml.device = "disk"
            source_params = {"attrs": {'file': device_source}}
            diskxml.source = diskxml.new_disk_source(**source_params)
            diskxml.target = {'dev': device_target, 'bus': device_disk_bus}
            if params.get("disk_model"):
                diskxml.model = params.get("disk_model")
            if pci_bridge_index and device_disk_bus == 'virtio':
                addr = diskxml.new_disk_address('pci')
                addr.set_attrs({'bus': pci_bridge_index, 'slot': slot})
                diskxml.address = addr
            return diskxml.xml

        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        slot = get_free_slot(pci_bridge_index, v_xml)
        disk_xml = _generate_disk_xml()
        attach(disk_xml, device_target, plug_method)
        if plug_method == "cold":
            disk_xml = _generate_disk_xml()
        detach(disk_xml, device_target, plug_method)
        if not utils_misc.wait_for(
                lambda: not libvirt.device_exists(vm, device_target),
                detect_time):
            test.fail("Detach disk failed.")

    def attach_controller():  # pylint: disable=W0611
        """
        Sub test for attach controller
        """
        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        contr_index = len(v_xml.get_controllers('scsi'))
        contr_type = params.get("controller_type", 'scsi')
        contr_model = params.get("controller_model", "virtio-scsi")
        contr_dict = {
            'controller_type': contr_type,
            'controller_model': contr_model,
            'controller_index': contr_index
        }
        if pci_bridge_index:
            slot = get_free_slot(pci_bridge_index, v_xml)
            addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot)
            contr_dict.update({'controller_addr': addr})
        xml = libvirt.create_controller_xml(contr_dict=contr_dict)
        attach(xml, params['controller_model'])
        xml = libvirt.create_controller_xml(contr_dict=contr_dict)
        detach(xml, params['controller_model'])

    def snapshot():  # pylint: disable=W0611
        """
        Sub test for snapshot
        """
        for i in range(1, 4):
            ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i)
            libvirt.check_exit_status(ret)
        process.system("systemctl restart libvirtd")
        save_path = os.path.join(tmp_dir, "test.save")
        ret = virsh.save(vm_name, save_path)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_path)
        libvirt.check_exit_status(ret)
        session = vm.wait_for_login()
        session.close()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    pci_bridge_index = None
    tmp_dir = data_dir.get_tmp_dir()
    guest_src_url = params.get("guest_src_url")

    if guest_src_url:

        def _download():
            download_cmd = ("wget %s -O %s" % (guest_src_url, target_path))
            if process.system(download_cmd, shell=True):
                test.error("Failed to download file")

        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            if utils_package.package_install("wget"):
                utils_misc.wait_for(_download, timeout=360)
            else:
                test.error("Fail to install wget")
        params["blk_source_name"] = target_path

    if add_pcie_to_pci_bridge:
        pci_controllers = vmxml.get_controllers('pci')
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-to-pci-bridge':
                pci_bridge = controller
                break
        else:
            contr_dict = {
                'controller_type': 'pci',
                'controller_model': 'pcie-to-pci-bridge'
            }
            pci_bridge = libvirt.create_controller_xml(contr_dict,
                                                       "add_controller",
                                                       vm_name)
        pci_bridge_index = '%0#4x' % int(pci_bridge.get("index"))
    try:
        if (params["os_variant"] == 'rhel6'
                or 'rhel6' in params.get("shortname")):
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        if pci_bridge_index:
            v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if params.get("disk_target_bus") == "scsi":
                scsi_controllers = v_xml.get_controllers('scsi')
                for index, controller in enumerate(scsi_controllers):
                    controller.find('address').set('bus', pci_bridge_index)
                    controller.find('address').set(
                        'slot', get_free_slot(pci_bridge_index, v_xml))
            else:
                disks = v_xml.get_devices(device_type="disk")
                for index, disk in enumerate(disks):
                    args = {
                        'bus': pci_bridge_index,
                        'slot': get_free_slot(pci_bridge_index, v_xml)
                    }
                    libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address',
                                          args)
            v_xml.xmltreefile.write()
            v_xml.sync()
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        test_step = params.get("sub_test_step")
        if test_step:
            eval(test_step)()
    finally:
        vm.destroy()
        libvirt.clean_up_snapshots(vm_name)
        backup_xml.sync()
コード例 #49
0
        logging.warn("qemu does not support qmp. Human monitor will be used.")
        monitor = vm.monitor
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
    cdrom = params.get("cdrom_cd1")
    cdrom = utils_misc.get_path(data_dir.get_data_dir(), cdrom)
    device_name = vm.get_block({"file": cdrom})
    if device_name is None:
        msg = "Unable to detect qemu block device for cdrom %s" % cdrom
        raise error.TestError(msg)
    orig_img_name = params.get("orig_img_name")
    change_insert_cmd = "change device=%s,target=%s" % (device_name,
                                                        orig_img_name)
    monitor.send_args_cmd(change_insert_cmd)
    logging.info("Wait until device is ready")
    exists = utils_misc.wait_for(lambda:
                                 (orig_img_name in str(monitor.info("block"))),
                                 timeout=10,
                                 first=3)
    if not exists:
        msg = "Fail to insert device %s to guest" % orig_img_name
        raise error.TestFail(msg)

    if check_block_locked(device_name):
        raise error.TestFail("Unused device is locked.")

    if params.get("os_type") != "windows":
        error.context("mount cdrom to make status to locked", logging.info)
        cdroms = utils_misc.wait_for(
            lambda: (utils_test.get_readable_cdroms(params, session)),
            timeout=10)
        if not cdroms:
            raise error.TestFail("Not readable cdrom found in your guest")
コード例 #50
0
def run(test, params, env):
    """
    Test virtio-fs by sharing the data between host and guest.
    Steps:
        1. Create shared directories on the host.
        2. Run virtiofsd daemons on the host.
        3. Boot a guest on the host with virtiofs options.
        4. Log into guest then mount the virtiofs targets.
        5. Generate files or run stress on the mount points inside guest.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def get_viofs_exe(session):
        """
        Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64
        """
        logging.info("Get virtiofs exe full path.")
        media_type = params["virtio_win_media_type"]
        try:
            get_drive_letter = getattr(virtio_win,
                                       "drive_letter_%s" % media_type)
            get_product_dirname = getattr(virtio_win,
                                          "product_dirname_%s" % media_type)
            get_arch_dirname = getattr(virtio_win,
                                       "arch_dirname_%s" % media_type)
        except AttributeError:
            test.error("Not supported virtio win media type '%s'", media_type)
        viowin_ltr = get_drive_letter(session)
        if not viowin_ltr:
            test.error("Could not find virtio-win drive in guest")
        guest_name = get_product_dirname(session)
        if not guest_name:
            test.error("Could not get product dirname of the vm")
        guest_arch = get_arch_dirname(session)
        if not guest_arch:
            test.error("Could not get architecture dirname of the vm")

        exe_middle_path = ("{name}\\{arch}" if media_type == "iso" else
                           "{arch}\\{name}").format(name=guest_name,
                                                    arch=guest_arch)
        exe_file_name = "virtiofs.exe"
        exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"'
        exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path)
        exe_path = session.cmd(exe_find_cmd).strip()
        logging.info("Found exe file '%s'", exe_path)
        return exe_path

    def get_stdev(file):
        """
        Get file's st_dev value.
        """
        stdev = session.cmd_output(cmd_get_stdev % file).strip()
        logging.info("%s device id is %s.", file, stdev)
        return stdev

    # data io config
    test_file = params.get('test_file')
    folder_test = params.get('folder_test')
    cmd_dd = params.get('cmd_dd')
    cmd_md5 = params.get('cmd_md5')
    cmd_new_folder = params.get('cmd_new_folder')
    cmd_copy_file = params.get('cmd_copy_file')
    cmd_rename_folder = params.get('cmd_rename_folder')
    cmd_check_folder = params.get('cmd_check_folder')
    cmd_del_folder = params.get('cmd_del_folder')

    # soft link config
    cmd_symblic_file = params.get('cmd_symblic_file')
    cmd_symblic_folder = params.get('cmd_symblic_folder')

    # pjdfs test config
    cmd_pjdfstest = params.get('cmd_pjdfstest')
    cmd_unpack = params.get('cmd_unpack')
    cmd_yum_deps = params.get('cmd_yum_deps')
    cmd_autoreconf = params.get('cmd_autoreconf')
    cmd_configure = params.get('cmd_configure')
    cmd_make = params.get('cmd_make')
    pjdfstest_pkg = params.get('pjdfstest_pkg')
    username = params.get('username')
    password = params.get('password')
    port = params.get('file_transfer_port')

    # fio config
    fio_options = params.get('fio_options')
    io_timeout = params.get_numeric('io_timeout')

    # xfstest config
    cmd_xfstest = params.get('cmd_xfstest')
    fs_dest_fs2 = params.get('fs_dest_fs2')
    cmd_download_xfstest = params.get('cmd_download_xfstest')
    cmd_yum_install = params.get('cmd_yum_install')
    cmd_make_xfs = params.get('cmd_make_xfs')
    cmd_setenv = params.get('cmd_setenv')
    cmd_setenv_nfs = params.get('cmd_setenv_nfs', '')
    cmd_useradd = params.get('cmd_useradd')
    fs_dest_fs1 = params.get('fs_dest_fs1')
    cmd_get_tmpfs = params.get('cmd_get_tmpfs')
    cmd_set_tmpfs = params.get('cmd_set_tmpfs')
    size_mem1 = params.get('size_mem1')

    # nfs config
    setup_local_nfs = params.get('setup_local_nfs')

    setup_hugepages = params.get("setup_hugepages", "no") == "yes"

    # st_dev check config
    cmd_get_stdev = params.get("cmd_get_stdev")
    nfs_mount_dst_name = params.get("nfs_mount_dst_name")
    if cmd_xfstest and not setup_hugepages:
        # /dev/shm is the default memory-backend-file, the default value is the
        # half of the host memory. Increase it to guest memory size to avoid crash
        ori_tmpfs_size = process.run(cmd_get_tmpfs,
                                     shell=True).stdout_text.replace("\n", "")
        logging.debug("original tmpfs size is %s", ori_tmpfs_size)
        params["post_command"] = cmd_set_tmpfs % ori_tmpfs_size
        params["pre_command"] = cmd_set_tmpfs % size_mem1

    if setup_local_nfs:
        for fs in params.objects("filesystems"):
            nfs_params = params.object_params(fs)

            params["export_dir"] = nfs_params.get("export_dir")
            params["nfs_mount_src"] = nfs_params.get("nfs_mount_src")
            params["nfs_mount_dir"] = nfs_params.get("fs_source_dir")
            if cmd_get_stdev:
                fs_source_dir = nfs_params.get("fs_source_dir")
                params["nfs_mount_dir"] = os.path.join(fs_source_dir,
                                                       nfs_mount_dst_name)
            nfs_local = nfs.Nfs(params)
            nfs_local.setup()

    try:
        if cmd_xfstest or setup_local_nfs or setup_hugepages:
            params["start_vm"] = "yes"
            env_process.preprocess(test, params, env)

        os_type = params.get("os_type")
        vm = env.get_vm(params.get("main_vm"))
        vm.verify_alive()
        session = vm.wait_for_login()
        host_addr = vm.get_address()

        if os_type == "windows":
            cmd_timeout = params.get_numeric("cmd_timeout", 120)
            driver_name = params["driver_name"]
            install_path = params["install_path"]
            check_installed_cmd = params["check_installed_cmd"] % install_path

            # Check whether windows driver is running,and enable driver verifier
            session = utils_test.qemu.windrv_check_running_verifier(
                session, vm, test, driver_name)
            # install winfsp tool
            error_context.context("Install winfsp for windows guest.",
                                  logging.info)
            installed = session.cmd_status(check_installed_cmd) == 0
            if installed:
                logging.info("Winfsp tool is already installed.")
            else:
                install_cmd = utils_misc.set_winutils_letter(
                    session, params["install_cmd"])
                session.cmd(install_cmd, cmd_timeout)
                if not utils_misc.wait_for(
                        lambda: not session.cmd_status(check_installed_cmd),
                        60):
                    test.error("Winfsp tool is not installed.")

        for fs in params.objects("filesystems"):
            fs_params = params.object_params(fs)
            fs_target = fs_params.get("fs_target")
            fs_dest = fs_params.get("fs_dest")

            fs_source = fs_params.get("fs_source_dir")
            base_dir = fs_params.get("fs_source_base_dir",
                                     data_dir.get_data_dir())
            if not os.path.isabs(fs_source):
                fs_source = os.path.join(base_dir, fs_source)

            host_data = os.path.join(fs_source, test_file)

            if os_type == "linux":
                error_context.context(
                    "Create a destination directory %s "
                    "inside guest." % fs_dest, logging.info)
                utils_misc.make_dirs(fs_dest, session)
                if not cmd_xfstest:
                    error_context.context(
                        "Mount virtiofs target %s to %s inside"
                        " guest." % (fs_target, fs_dest), logging.info)
                    if not utils_disk.mount(
                            fs_target, fs_dest, 'virtiofs', session=session):
                        test.fail('Mount virtiofs target failed.')

            else:
                error_context.context("Start virtiofs service in guest.",
                                      logging.info)
                viofs_sc_create_cmd = params["viofs_sc_create_cmd"]
                viofs_sc_start_cmd = params["viofs_sc_start_cmd"]
                viofs_sc_query_cmd = params["viofs_sc_query_cmd"]

                logging.info("Check if virtiofs service is registered.")
                status, output = session.cmd_status_output(viofs_sc_query_cmd)
                if "not exist as an installed service" in output:
                    logging.info("Register virtiofs service in windows guest.")
                    exe_path = get_viofs_exe(session)
                    viofs_sc_create_cmd = viofs_sc_create_cmd % exe_path
                    sc_create_s, sc_create_o = session.cmd_status_output(
                        viofs_sc_create_cmd)
                    if sc_create_s != 0:
                        test.fail(
                            "Failed to register virtiofs service, output is %s"
                            % sc_create_o)

                logging.info("Check if virtiofs service is started.")
                status, output = session.cmd_status_output(viofs_sc_query_cmd)
                if "RUNNING" not in output:
                    logging.info("Start virtiofs service.")
                    sc_start_s, sc_start_o = session.cmd_status_output(
                        viofs_sc_start_cmd)
                    if sc_start_s != 0:
                        test.fail(
                            "Failed to start virtiofs service, output is %s" %
                            sc_start_o)
                else:
                    logging.info("Virtiofs service is running.")

                viofs_log_file_cmd = params.get("viofs_log_file_cmd")
                if viofs_log_file_cmd:
                    error_context.context("Check if LOG file is created.",
                                          logging.info)
                    log_dir_s = session.cmd_status(viofs_log_file_cmd)
                    if log_dir_s != 0:
                        test.fail("Virtiofs log is not created.")

                # get fs dest for vm
                virtio_fs_disk_label = fs_target
                error_context.context(
                    "Get Volume letter of virtio fs target, the disk"
                    "lable is %s." % virtio_fs_disk_label, logging.info)
                vol_con = "VolumeName='%s'" % virtio_fs_disk_label
                volume_letter = utils_misc.wait_for(
                    lambda: utils_misc.get_win_disk_vol(session,
                                                        condition=vol_con),
                    cmd_timeout)
                if volume_letter is None:
                    test.fail("Could not get virtio-fs mounted volume letter.")
                fs_dest = "%s:" % volume_letter

            guest_file = os.path.join(fs_dest, test_file)
            logging.info("The guest file in shared dir is %s", guest_file)

            try:
                if cmd_dd:
                    error_context.context(
                        "Creating file under %s inside "
                        "guest." % fs_dest, logging.info)
                    session.cmd(cmd_dd % guest_file, io_timeout)

                    if os_type == "linux":
                        cmd_md5_vm = cmd_md5 % guest_file
                    else:
                        guest_file_win = guest_file.replace("/", "\\")
                        cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win)
                    md5_guest = session.cmd_output(
                        cmd_md5_vm, io_timeout).strip().split()[0]

                    logging.info(md5_guest)
                    md5_host = process.run(
                        "md5sum %s" % host_data,
                        io_timeout).stdout_text.strip().split()[0]
                    if md5_guest != md5_host:
                        test.fail(
                            'The md5 value of host is not same to guest.')

                if folder_test == 'yes':
                    error_context.context(
                        "Folder test under %s inside "
                        "guest." % fs_dest, logging.info)
                    session.cmd(cmd_new_folder % fs_dest)
                    try:
                        session.cmd(cmd_copy_file)
                        session.cmd(cmd_rename_folder)
                        session.cmd(cmd_del_folder)
                        status = session.cmd_status(cmd_check_folder)
                        if status == 0:
                            test.fail("The folder are not deleted.")
                    finally:
                        if os_type == "linux":
                            session.cmd("cd -")

                if cmd_symblic_file:
                    error_context.context(
                        "Symbolic test under %s inside "
                        "guest." % fs_dest, logging.info)
                    session.cmd(cmd_new_folder % fs_dest)
                    if session.cmd_status(cmd_symblic_file):
                        test.fail("Creat symbolic files failed.")
                    if session.cmd_status(cmd_symblic_folder):
                        test.fail("Creat symbolic folders failed.")
                    if os_type == "linux":
                        session.cmd("cd -")

                if fio_options:
                    error_context.context("Run fio on %s." % fs_dest,
                                          logging.info)
                    fio = generate_instance(params, vm, 'fio')
                    try:
                        fio.run(fio_options % guest_file, io_timeout)
                    finally:
                        fio.clean()
                    vm.verify_dmesg()

                if cmd_pjdfstest:
                    error_context.context("Run pjdfstest on %s." % fs_dest,
                                          logging.info)
                    host_path = os.path.join(
                        data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg)
                    scp_to_remote(host_addr, port, username, password,
                                  host_path, fs_dest)
                    session.cmd(cmd_unpack.format(fs_dest), 180)
                    session.cmd(cmd_yum_deps, 180)
                    session.cmd(cmd_autoreconf % fs_dest, 180)
                    session.cmd(cmd_configure.format(fs_dest), 180)
                    session.cmd(cmd_make % fs_dest, io_timeout)
                    status, output = session.cmd_status_output(
                        cmd_pjdfstest % fs_dest, io_timeout)
                    if status != 0:
                        logging.info(output)
                        test.fail('The pjdfstest failed.')

                if cmd_xfstest:
                    error_context.context("Run xfstest on guest.",
                                          logging.info)
                    utils_misc.make_dirs(fs_dest_fs2, session)
                    if session.cmd_status(cmd_download_xfstest, 360):
                        test.error("Failed to download xfstests-dev")
                    session.cmd(cmd_yum_install, 180)

                    # Due to the increase of xfstests-dev cases, more time is
                    # needed for compilation here.
                    status, output = session.cmd_status_output(
                        cmd_make_xfs, 900)
                    if status != 0:
                        logging.info(output)
                        test.error("Failed to build xfstests-dev")
                    session.cmd(cmd_setenv, 180)
                    session.cmd(cmd_setenv_nfs, 180)
                    session.cmd(cmd_useradd, 180)

                    try:
                        output = session.cmd_output(cmd_xfstest, io_timeout)
                        logging.info("%s", output)
                        if 'Failed' in output:
                            test.fail('The xfstest failed.')
                        else:
                            break
                    except (aexpect.ShellStatusError,
                            aexpect.ShellTimeoutError):
                        test.fail('The xfstest failed.')

                if cmd_get_stdev:
                    error_context.context(
                        "Create files in local device and"
                        " nfs device ", logging.info)
                    file_in_local_host = os.path.join(fs_source, "file_test")
                    file_in_nfs_host = os.path.join(fs_source,
                                                    nfs_mount_dst_name,
                                                    "file_test")
                    cmd_touch_file = "touch %s && touch %s" % (
                        file_in_local_host, file_in_nfs_host)
                    process.run(cmd_touch_file)
                    error_context.context(
                        "Check if the two files' st_dev are"
                        " the same on guest.", logging.info)
                    file_in_local_guest = os.path.join(fs_dest, "file_test")
                    file_in_nfs_guest = os.path.join(fs_dest,
                                                     nfs_mount_dst_name,
                                                     "file_test")
                    if get_stdev(file_in_local_guest) == get_stdev(
                            file_in_nfs_guest):
                        test.fail("st_dev are the same on diffrent device.")
            finally:
                if os_type == "linux":
                    utils_disk.umount(fs_target,
                                      fs_dest,
                                      'virtiofs',
                                      session=session)
                    utils_misc.safe_rmdir(fs_dest, session=session)
    finally:
        if setup_local_nfs:
            if vm.is_alive():
                vm.destroy()
            for fs in params.objects("filesystems"):
                nfs_params = params.object_params(fs)
                params["export_dir"] = nfs_params.get("export_dir")
                params["nfs_mount_dir"] = nfs_params.get("fs_source_dir")
                params["rm_export_dir"] = nfs_params.get("export_dir")
                params["rm_mount_dir"] = nfs_params.get("fs_source_dir")
                if cmd_get_stdev:
                    fs_source_dir = nfs_params.get("fs_source_dir")
                    params["nfs_mount_dir"] = os.path.join(
                        fs_source_dir, nfs_mount_dst_name)
                nfs_local = nfs.Nfs(params)
                nfs_local.cleanup()
                utils_misc.safe_rmdir(params["export_dir"])

    # during all virtio fs is mounted, reboot vm
    if params.get('reboot_guest', 'no') == 'yes':

        def get_vfsd_num():
            """
            Get virtiofsd daemon number during vm boot up.
            :return: virtiofsd daemon count.
            """
            cmd_ps_virtiofsd = params.get('cmd_ps_virtiofsd')
            vfsd_num = 0
            for device in vm.devices:
                if isinstance(device, qdevices.QVirtioFSDev):
                    sock_path = device.get_param('sock_path')
                    cmd_ps_virtiofsd = cmd_ps_virtiofsd % sock_path
                    vfsd_ps = process.system_output(cmd_ps_virtiofsd,
                                                    shell=True)
                    vfsd_num += len(vfsd_ps.strip().splitlines())
            return vfsd_num

        error_context.context("Check virtiofs daemon before reboot vm.",
                              logging.info)

        vfsd_num_bf = get_vfsd_num()
        error_context.context("Reboot guest and check virtiofs daemon.",
                              logging.info)
        vm.reboot()
        if not vm.is_alive():
            test.fail("After rebooting vm quit unexpectedly.")
        vfsd_num_af = get_vfsd_num()

        if vfsd_num_bf != vfsd_num_af:
            test.fail(
                "Virtiofs daemon is different before and after reboot.\n"
                "Before reboot: %s\n"
                "After reboot: %s\n", (vfsd_num_bf, vfsd_num_af))
コード例 #51
0
ファイル: win_heavyload.py プロジェクト: tsk-lieacui/tp-qemu
def run(test, params, env):
    """
    KVM guest stop test:
    1) Log into a guest
    2) Check is HeavyLoad.exe installed , download and
       install it if not installed.
    3) Start Heavyload to make guest in heavyload
    4) Check vm is alive
    5) Stop heavyload process and clean temp file.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def loop_session_cmd(session, cmd):
        def session_cmd(session, cmd):
            try:
                return session.cmd_status(cmd) == 0
            except (aexpect.ShellStatusError, aexpect.ShellTimeoutError):
                pass

        count = 0
        while count < 3:
            ret = session_cmd(session, cmd)
            if ret is not None:
                return ret
            count += 1
        return None

    def add_option(cmd, key, val):
        """
        Append options into command;
        """
        if re.match(r".*/%s.*", cmd, re.I):
            if val:
                rex = r"/%s\b+\S+\b+" % key
                val = "/%s %s " % (key, val)
                cmd = re.sub(rex, val, cmd, re.I)
        else:
            cmd += " /%s %s " % (key, val)
        return cmd

    tmp_dir = data_dir.get_tmp_dir()
    install_path = params["install_path"].rstrip("\\")
    heavyload_bin = '"%s\heavyload.exe"' % install_path
    start_cmd = "%s /CPU /MEMORY /FILE " % heavyload_bin
    stop_cmd = "taskkill /T /F /IM heavyload.exe"
    stop_cmd = params.get("stop_cmd", stop_cmd)
    start_cmd = params.get("start_cmd", start_cmd)
    check_running_cmd = "tasklist|findstr /I heavyload"
    check_running_cmd = params.get("check_running_cmd", check_running_cmd)
    test_installed_cmd = 'dir "%s"|findstr /I heavyload' % install_path
    test_installed_cmd = params.get("check_installed_cmd", test_installed_cmd)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=timeout)

    installed = session.cmd_status(test_installed_cmd) == 0
    if not installed:
        download_url = params.get("download_url")
        if download_url:
            dst = r"c:\\"
            pkg_md5sum = params["pkg_md5sum"]
            error_context.context("Download HeavyLoadSetup.exe", logging.info)
            pkg = utils.unmap_url_cache(tmp_dir, download_url, pkg_md5sum)
            vm.copy_files_to(pkg, dst)
        else:
            dst = r"%s:\\" % utils_misc.get_winutils_vol(session)

        error_context.context("Install HeavyLoad in guest", logging.info)
        install_cmd = params["install_cmd"]
        install_cmd = re.sub(r"DRIVE:\\+", dst, install_cmd)
        session.cmd(install_cmd)
        config_cmd = params.get("config_cmd")
        if config_cmd:
            session.cmd(config_cmd)

    error_context.context("Start heavyload in guest", logging.info)
    # genery heavyload command automaticly
    if params.get("autostress") == "yes":
        free_mem = utils_misc.get_free_mem(session, "windows")
        free_disk = utils_misc.get_free_disk(session, "C:")
        start_cmd = '"%s\heavyload.exe"' % params["install_path"]
        start_cmd = add_option(start_cmd, 'CPU', params["smp"])
        start_cmd = add_option(start_cmd, 'MEMORY', free_mem)
        start_cmd = add_option(start_cmd, 'FILE', free_disk)
    else:
        start_cmd = params["start_cmd"]
    # reformat command to ensure heavyload started as except
    test_timeout = int(params.get("timeout", "60"))
    steping = 60
    if test_timeout < 60:
        logging.warn(
            "Heavyload use mins as unit of timeout, given timeout "
            "is too small (%ss), force set to 60s", test_timeout)
        test_timeout = 60
        steping = 30
    start_cmd = add_option(start_cmd, 'DURATION', test_timeout / 60)
    start_cmd = add_option(start_cmd, 'START', '')
    start_cmd = add_option(start_cmd, 'AUTOEXIT', '')
    logging.info("heavyload cmd: %s" % start_cmd)
    session.sendline(start_cmd)
    if not loop_session_cmd(session, check_running_cmd):
        test.error("heavyload process is not started")

    sleep_before_migration = int(params.get("sleep_before_migration", "0"))
    time.sleep(sleep_before_migration)

    error_context.context("Verify vm is alive", logging.info)
    utils_misc.wait_for(vm.verify_alive,
                        timeout=test_timeout * 1.2,
                        step=steping)

    if not session.cmd_status(check_running_cmd):
        test.fail("heavyload doesn't exist normally")
    if session:
        session.close()
コード例 #52
0
def run(test, params, env):
    """
    Test rng device options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def check_rng_xml(xml_set, exists=True):
        """
        Check rng xml in/not in domain xml
        :param xml_set: rng xml object for setting
        :param exists: Check xml exists or not in domain xml

        :return: boolean
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get all current xml rng devices
        xml_devices = vmxml.devices
        rng_devices = xml_devices.by_device_tag("rng")
        logging.debug("rng_devices is %s", rng_devices)

        # check if xml attr same with checking
        try:
            rng_index = xml_devices.index(rng_devices[0])
            xml_get = xml_devices[rng_index]

            if not exists:
                # should be detach device check
                return False
        except IndexError:
            if exists:
                # should be attach device check
                return False
            else:
                logging.info("Can not find rng xml as expected")
                return True

        def get_compare_values(xml_set, xml_get, rng_attr):
            """
            Get set and get value to compare

            :param xml_set: seting xml object
            :param xml_get: getting xml object
            :param rng_attr: attribute of rng device
            :return: set and get value in xml
            """
            try:
                set_value = xml_set[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                set_value = None
            try:
                get_value = xml_get[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                get_value = None
            logging.debug(
                "get xml_set value(%s) is %s, get xml_get value is %s",
                rng_attr, set_value, get_value)
            return (set_value, get_value)

        match = True
        for rng_attr in xml_set.__slots__:
            set_value, get_value = get_compare_values(xml_set, xml_get,
                                                      rng_attr)
            logging.debug("rng_attr=%s, set_value=%s, get_value=%s", rng_attr,
                          set_value, get_value)
            if set_value and set_value != get_value:
                if rng_attr == 'backend':
                    for bak_attr in xml_set.backend.__slots__:
                        set_backend, get_backend = get_compare_values(
                            xml_set.backend, xml_get.backend, bak_attr)
                        if set_backend and set_backend != get_backend:
                            if bak_attr == 'source':
                                set_source = xml_set.backend.source
                                get_source = xml_get.backend.source
                                find = False
                                for i in range(len(set_source)):
                                    for j in get_source:
                                        if set(set_source[i].items()).issubset(
                                                j.items()):
                                            find = True
                                            break
                                    if not find:
                                        logging.debug(
                                            "set source(%s) not in get source(%s)",
                                            set_source[i], get_source)
                                        match = False
                                        break
                                    else:
                                        continue
                            else:
                                logging.debug(
                                    "set backend(%s)- %s not equal to get backend-%s",
                                    rng_attr, set_backend, get_backend)
                                match = False
                                break
                        else:
                            continue
                        if not match:
                            break
                else:
                    logging.debug("set value(%s)-%s not equal to get value-%s",
                                  rng_attr, set_value, get_value)
                    match = False
                    break
            else:
                continue
            if not match:
                break

        if match:
            logging.info("Find same rng xml as hotpluged")
        else:
            test.fail("Rng xml in VM not same with attached xml")

        return True

    def modify_rng_xml(dparams, sync=True, get_xml=False):
        """
        Modify interface xml options

        :params dparams: parameters for organize xml
        :params sync: whether sync to domain xml, if get_xml is True,
                      then sync will not take effect
        :params get_xml: whether get device xml
        :return: if get_xml=True, return xml file
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_model = dparams.get("backend_model", "random")
        backend_type = dparams.get("backend_type")
        backend_dev = dparams.get("backend_dev", "")
        backend_source_list = dparams.get("backend_source", "").split()
        backend_protocol = dparams.get("backend_protocol")
        rng_alias = dparams.get("rng_alias")
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng_xml = rng.Rng()
        rng_xml.rng_model = rng_model
        if rng_rate:
            rng_xml.rate = ast.literal_eval(rng_rate)
        backend = rng.Rng.Backend()
        backend.backend_model = backend_model
        if backend_type:
            backend.backend_type = backend_type
        if backend_dev:
            backend.backend_dev = backend_dev
        if backend_source_list:
            source_list = [
                ast.literal_eval(source) for source in backend_source_list
            ]
            backend.source = source_list
        if backend_protocol:
            backend.backend_protocol = backend_protocol
        rng_xml.backend = backend
        if detach_alias:
            rng_xml.alias = dict(name=rng_alias)

        logging.debug("Rng xml: %s", rng_xml)
        if get_xml:
            return rng_xml
        if sync:
            vmxml.add_device(rng_xml)
            vmxml.xmltreefile.write()
            vmxml.sync()
        else:
            status = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<rng.*<\/rng>", str(rng_xml),
                                      re.M)[0].replace("/", "\/"))])
            if not status:
                test.fail("Failed to edit vm xml")

    def check_qemu_cmd(dparams):
        """
        Verify qemu-kvm command line.
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_type = dparams.get("backend_type")
        backend_source_list = dparams.get("backend_source", "").split()
        cmd = ("ps -ef | grep %s | grep -v grep" % vm_name)
        chardev = src_host = src_port = None
        if backend_type == "tcp":
            chardev = "socket"
        elif backend_type == "udp":
            chardev = "udp"
        for bc_source in backend_source_list:
            source = ast.literal_eval(bc_source)
            if "mode" in source and source['mode'] == "connect":
                src_host = source['host']
                src_port = source['service']

        if chardev and src_host and src_port:
            cmd += (" | grep 'chardev %s,.*host=%s,port=%s'" %
                    (chardev, src_host, src_port))
        if rng_model == "virtio":
            cmd += (" | grep 'device %s'" % dparams.get("rng_device"))
        if rng_rate:
            rate = ast.literal_eval(rng_rate)
            cmd += (" | grep 'max-bytes=%s,period=%s'" %
                    (rate['bytes'], rate['period']))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't see rng option" " in command line")

    def check_host():
        """
        Check random device on host
        """
        backend_dev = params.get("backend_dev")
        if backend_dev:
            cmd = "lsof |grep %s" % backend_dev
            ret = process.run(cmd, ignore_status=True, shell=True)
            if ret.exit_status or not ret.stdout_text.count("qemu"):
                test.fail("Failed to check random device"
                          " on host, command output: %s" % ret.stdout_text)

    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1, debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist" % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(vm_name,
                                    ("%s %s" % (snapshot_name, options)),
                                    debug=True)
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name, debug=True)
        if ret.stdout.strip().count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only" % snapshot_name2,
                                       debug=True)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name, debug=True)
        if not ret.stdout.strip().count(snapshot_name2):
            test.fail("Failed to find snapshot disk")

    def check_guest_dump(session, exists=True):
        """
        Check guest with hexdump

        :param session: ssh session to guest
        :param exists: check rng device exists/not exists
        """
        check_cmd = "hexdump /dev/hwrng"
        try:
            status = session.cmd_status(check_cmd, 5)

            if status != 0 and exists:
                test.fail("Fail to check hexdump in guest")
            elif not exists:
                logging.info("hexdump cmd failed as expected")
        except aexpect.exceptions.ShellTimeoutError:
            if not exists:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("Hexdump do not fail with error")

    def check_guest(session, expect_fail=False):
        """
        Check random device on guest

        :param session: ssh session to guest
        :param expect_fail: expect the dd cmd pass or fail
        """
        rng_files = ("/sys/devices/virtual/misc/hw_random/rng_available",
                     "/sys/devices/virtual/misc/hw_random/rng_current")
        rng_avail = session.cmd_output("cat %s" % rng_files[0],
                                       timeout=timeout).strip()
        rng_currt = session.cmd_output("cat %s" % rng_files[1],
                                       timeout=timeout).strip()
        logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt)
        if not rng_currt.count("virtio") or rng_currt not in rng_avail:
            test.fail("Failed to check rng file on guest")

        # Read the random device
        rng_rate = params.get("rng_rate")
        # For rng rate test this command and return in a short time
        # but for other test it will hang
        cmd = ("dd if=/dev/hwrng of=rng.test count=100" " && rm -f rng.test")
        try:
            ret, output = session.cmd_status_output(cmd, timeout=timeout)
            if ret and expect_fail:
                logging.info("dd cmd failed as expected")
            elif ret:
                test.fail("Failed to read the random device")
        except aexpect.exceptions.ShellTimeoutError:
            logging.info("dd cmd timeout")
            # Close session as the current session still hang on last cmd
            session.close()
            session = vm.wait_for_login()

            if expect_fail:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("dd cmd do not fail with error")
                # Check if file have data
                size = session.cmd_output("wc -c rng.test").split()[0]
                if int(size) > 0:
                    logging.info("/dev/hwrng is not empty, size %s", size)
                else:
                    test.fail("/dev/hwrng is empty")
        finally:
            session.cmd("rm -f rng.test")

        if rng_rate:
            rate_bytes, rate_period = list(ast.literal_eval(rng_rate).values())
            rate_conf = float(rate_bytes) / (float(rate_period) / 1000)
            ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s", output, re.M)
            if not ret:
                test.fail("Can't find rate from output")
            rate_real = float(ret.group(1)) / float(ret.group(2))
            logging.debug("Find rate: %s, config rate: %s", rate_real,
                          rate_conf)
            if rate_real > rate_conf * 1.2:
                test.fail("The rate of reading exceed"
                          " the limitation of configuration")
        if device_num > 1:
            rng_dev = rng_avail.split()
            if len(rng_dev) != device_num:
                test.cancel("Multiple virtio-rng devices are not"
                            " supported on this guest kernel. "
                            "Bug: https://bugzilla.redhat.com/"
                            "show_bug.cgi?id=915335")
            session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1]))
            # Read the random device
            if session.cmd_status(cmd, timeout=timeout):
                test.fail("Failed to read the random device")

    def get_rng_device(guest_arch, rng_model):
        """
        Return the expected rng device in qemu cmd
        :param guest_arch: e.g. x86_64
        :param rng_model: the value for //rng@model, e.g. "virtio"
        :return: expected device type in qemu cmd
        """
        if "virtio" in rng_model:
            return "virtio-rng-pci" if "s390x" not in guest_arch else "virtio-rng-ccw"
        else:
            test.fail("Unknown rng model %s" % rng_model)

    start_error = "yes" == params.get("start_error", "no")
    status_error = "yes" == params.get("status_error", "no")

    test_host = "yes" == params.get("test_host", "no")
    test_guest = "yes" == params.get("test_guest", "no")
    test_guest_dump = "yes" == params.get("test_guest_dump", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    snapshot_vm_running = "yes" == params.get("snapshot_vm_running", "no")
    snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no")
    snapshot_name = params.get("snapshot_name")
    device_num = int(params.get("device_num", 1))
    detach_alias = "yes" == params.get("rng_detach_alias", "no")
    detach_alias_options = params.get("rng_detach_alias_options")
    attach_rng = "yes" == params.get("rng_attach_device", "no")
    attach_options = params.get("rng_attach_options", "")
    random_source = "yes" == params.get("rng_random_source", "yes")
    timeout = int(params.get("timeout", 600))
    wait_timeout = int(params.get("wait_timeout", 60))

    if device_num > 1 and not libvirt_version.version_compare(1, 2, 7):
        test.skip("Multiple virtio-rng devices not "
                  "supported on this libvirt version")

    guest_arch = params.get("vm_arch_name", "x86_64")

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Try to install rng-tools on host, it can speed up random rate
    # if installation failed, ignore the error and continue the test
    if utils_package.package_install(["rng-tools"]):
        rngd_conf = "/etc/sysconfig/rngd"
        rngd_srv = "/usr/lib/systemd/system/rngd.service"
        if os.path.exists(rngd_conf):
            # For rhel6 host, add extraoptions
            with open(rngd_conf, 'w') as f_rng:
                f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"')
        elif os.path.exists(rngd_srv):
            # For rhel7 host, modify start options
            rngd_srv_conf = "/etc/systemd/system/rngd.service"
            if not os.path.exists(rngd_srv_conf):
                shutil.copy(rngd_srv, rngd_srv_conf)
            process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd"
                        " -f -r /dev/urandom -o /dev/random#' %s" %
                        rngd_srv_conf,
                        shell=True)
            process.run('systemctl daemon-reload')
        process.run("service rngd start")

    # Build the xml and run test.
    try:
        bgjob = None

        # Prepare xml, make sure no extra rng dev.
        vmxml = vmxml_backup.copy()
        vmxml.remove_all_device_by_type('rng')
        vmxml.sync()
        logging.debug("Prepared vm xml without rng dev is %s", vmxml)

        # Take snapshot if needed
        if snapshot_name:
            if snapshot_vm_running:
                vm.start()
                vm.wait_for_login().close()
            ret = virsh.snapshot_create_as(vm_name, snapshot_name, debug=True)
            libvirt.check_exit_status(ret)

        # Destroy VM first
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Build vm xml.
        dparams = {}
        if device_num > 1:
            for i in xrange(device_num):
                rng_model = params.get("rng_model_%s" % i, "virtio")
                dparams[i] = {"rng_model": rng_model}
                dparams[i].update({
                    "backend_model":
                    params.get("backend_model_%s" % i, "random")
                })
                dparams[i].update(
                    {"rng_device": get_rng_device(guest_arch, rng_model)})
                bk_type = params.get("backend_type_%s" % i)
                if bk_type:
                    dparams[i].update({"backend_type": bk_type})
                bk_dev = params.get("backend_dev_%s" % i)
                if bk_dev:
                    dparams[i].update({"backend_dev": bk_dev})
                bk_src = params.get("backend_source_%s" % i)
                if bk_src:
                    dparams[i].update({"backend_source": bk_src})
                bk_pro = params.get("backend_protocol_%s" % i)
                if bk_pro:
                    dparams[i].update({"backend_protocol": bk_pro})
                modify_rng_xml(dparams[i], False)
        else:
            params.update({
                "rng_device":
                get_rng_device(guest_arch, params.get("rng_model", "virtio"))
            })

            if detach_alias:
                device_alias = "ua-" + str(uuid.uuid4())
                params.update({"rng_alias": device_alias})

            rng_xml = modify_rng_xml(params, not test_snapshot, attach_rng)

        try:
            # Add random server
            if random_source and params.get(
                    "backend_type") == "tcp" and not test_guest_dump:
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)

            vm.start()
            if attach_rng:
                ret = virsh.attach_device(vm_name,
                                          rng_xml.xml,
                                          flagstr=attach_options,
                                          wait_remove_event=True,
                                          debug=True,
                                          ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if status_error:
                    return
                if not check_rng_xml(rng_xml, True):
                    test.fail("Can not find rng device in xml")

            else:
                # Start the VM.
                if start_error:
                    test.fail("VM started unexpectedly")

            if test_qemu_cmd and not attach_rng:
                if device_num > 1:
                    for i in xrange(device_num):
                        check_qemu_cmd(dparams[i])
                else:
                    check_qemu_cmd(params)
            if test_host:
                check_host()
            session = vm.wait_for_login()
            if test_guest:
                check_guest(session)
            if test_guest_dump:
                if params.get("backend_type") == "tcp":
                    cmd = "cat /dev/random | nc -4 localhost 1024"
                    bgjob = utils_misc.AsyncJob(cmd)
                check_guest_dump(session, True)
            if test_snapshot:
                check_snapshot(bgjob)

            if detach_alias:
                result = virsh.detach_device_alias(vm_name,
                                                   device_alias,
                                                   detach_alias_options,
                                                   debug=True)
                if "--config" in detach_alias_options:
                    vm.destroy()

                def have_rng_xml():
                    """
                    check if xml have rng item
                    """
                    output = virsh.dumpxml(vm_name)
                    return not output.stdout.strip().count("<rng model=")

                if utils_misc.wait_for(have_rng_xml, wait_timeout):
                    logging.info("Cannot find rng device in xml after detach")
                else:
                    test.fail("Found rng device in xml after detach")

            # Detach after attach
            if attach_rng:
                ret = virsh.detach_device(vm_name,
                                          rng_xml.xml,
                                          flagstr=attach_options,
                                          debug=True,
                                          ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if utils_misc.wait_for(lambda: check_rng_xml(rng_xml, False),
                                       wait_timeout):
                    logging.info("Find same rng xml as hotpluged")
                else:
                    test.fail("Rng device still exists after detach!")

                if test_guest_dump:
                    check_guest_dump(session, False)

            session.close()
        except virt_vm.VMStartError as details:
            logging.info(str(details))
            if not start_error:
                test.fail('VM failed to start, '
                          'please refer to https://bugzilla.'
                          'redhat.com/show_bug.cgi?id=1220252:'
                          '\n%s' % details)
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name, debug=True)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name,
                                      snapshot,
                                      "--metadata",
                                      debug=True)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if bgjob:
            bgjob.kill_func()
コード例 #53
0
    def add_device(pci_num, queues=1):
        info_pci_ref = vm.monitor.info("pci")
        reference = session.cmd_output(reference_cmd)

        try:
            # get function for adding device.
            add_fuction = local_functions["%s_%s" % (cmd_type, pci_type)]
        except Exception:
            test.error("No function for adding '%s' dev with '%s'" %
                       (pci_type, cmd_type))
        after_add = None
        if add_fuction:
            # Do add pci device.
            after_add = add_fuction(pci_num, queues)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                o = session.cmd_output(reference_cmd)
                return o != reference

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output(params.get("find_pci_cmd"))
                output = [line.strip() for line in output.splitlines()]
                ref = [line.strip() for line in reference.splitlines()]
                output = [_ for _ in output if _ not in ref]
                output = "\n".join(output)
                if re.search(params.get("match_string"), output, re.I | re.M):
                    return True
                return False

            error_context.context("Start checking new added device",
                                  logging.info)
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                test.fail("No new PCI device shown after executing "
                          "monitor command: 'info pci'")

            secs = int(params.get("wait_secs_for_hook_up", 3))
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                test.fail("No new device shown in output of command "
                          "executed inside the guest: %s" % reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                test.fail("PCI %s %s device not found in guest. "
                          "Command was: %s" %
                          (pci_model, pci_type, params.get("find_pci_cmd")))

            # Test the newly added device
            try:
                if params.get("pci_test_cmd"):
                    test_cmd = re.sub("PCI_NUM", "%s" % (pci_num + 1),
                                      params.get("pci_test_cmd"))
                    session.cmd(test_cmd, timeout=disk_op_timeout)
            except aexpect.ShellError as e:
                test.fail("Check for %s device failed after PCI "
                          "hotplug. Output: %r" % (pci_type, e.output))

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise
コード例 #54
0
def run(test, params, env):
    """
    While the guest is using the interface, delete it.

    1) Boot a guest with network card.
    2) Ping host from guest, should return successfully.
    3) In host, disable the interface which the guest uses.
    4) Ping host would fail.
    5) Enable the interface again, ping would work.
    6) Remove the interface from host,
       qemu would not crash, the guest would not crash too.
    7) Shutdown guest, and repeat step1 to step2. Guest would recover.
    """
    secs_after_iplink_action = 3
    login_timeout = int(params.get("login_timeout", 360))

    # Step 1: Boot a guest
    vm = env.get_vm(params.get("main_vm"))
    vm.verify_alive()

    error_context.context("Login to guest", logging.info)
    vm.wait_for_login(timeout=login_timeout)

    # Step 2, ping should work
    guest_ip = vm.get_address()
    error_context.context("Get the guest ip %s" % guest_ip, logging.info)

    error_context.context("Ping test from host to guest, should work",
                          logging.info)
    status, output = utils_test.ping(guest_ip, 30, timeout=20)
    if status != 0:
        test.fail("Ping failed, status: %s, output: %s" % (status, output))

    host_ifname_name = vm.get_ifname()
    error_context.context("Get interface name: %s. " % host_ifname_name,
                          logging.info)
    host_ifname = utils_net.Interface(host_ifname_name)

    # Step 3,4, disable interface and ping should fail
    error_context.context("Set interface %s down." % host_ifname_name,
                          logging.info)
    host_ifname.down()
    time.sleep(secs_after_iplink_action)

    error_context.context(
        "After disable the ifname, "
        "Ping test from host to guest, should fail.", logging.info)
    status, output = utils_test.ping(guest_ip, 30, timeout=20)
    if status == 0:
        test.fail("Ping should fail, status: %s, output: %s" %
                  (status, output))

    # Step 5, enable interface, ping should work
    error_context.context("Set interface %s up." % host_ifname_name,
                          logging.info)
    host_ifname.up()
    time.sleep(secs_after_iplink_action)

    error_context.context(
        "After enable the ifname, "
        "Ping test from host to guest, should work", logging.info)
    status, output = utils_test.ping(guest_ip, 30, timeout=20)
    if status != 0:
        test.fail("Ping should work, status: %s, output: %s" %
                  (status, output))

    # Step 6, delete the interface, qemu should not crash,
    # ping should fail
    error_context.context("Delete the interface %s." % host_ifname_name,
                          logging.info)
    host_ifname.dellink()
    time.sleep(secs_after_iplink_action)

    error_context.context(
        "After delete the ifname, "
        "VM and qemu should not crash, ping should fail", logging.info)
    vm.verify_alive()
    status, output = utils_test.ping(guest_ip, 30, timeout=20)
    if status == 0:
        test.fail("Ping should fail, status: %s, output: %s" %
                  (status, output))

    # Step 7, shutdown guest, and restart a guest
    error_context.context("Shutdown the VM.", logging.info)
    session = vm.wait_for_serial_login()
    shutdown_cmd = params.get("shutdown_command", "shutdown")
    logging.debug("Shutdown guest with command %s" % shutdown_cmd)
    session.sendline(shutdown_cmd)

    error_context.context("Waiting VM to go down", logging.info)

    if not utils_misc.wait_for(vm.is_dead, 360, 0, 1):
        test.fail("Guest refuses to go down")
    env_process.preprocess_vm(test, params, env, params.get("main_vm"))

    # Repeat step 1: Boot a guest
    vm = env.get_vm(params.get("main_vm"))
    vm.verify_alive()

    error_context.context("Login to guest", logging.info)
    vm.wait_for_login(timeout=login_timeout)

    guest_ip = vm.get_address()
    error_context.context("Get the guest ip %s" % guest_ip, logging.info)

    # Repeat step 2, ping should work
    error_context.context("Ping test from host to guest, should work",
                          logging.info)
    status, output = utils_test.ping(guest_ip, 30, timeout=20)
    if status != 0:
        test.fail("Ping failed, status: %s, output: %s" % (status, output))
コード例 #55
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba,
                              pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail(
                    "block device not found with scsi_%s", new_vhba_scsibus)
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            path_to_blk = list(vol_list.values())[0]
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": first_online_hba,
                "scsi_wwnn": wwnn,
                "scsi_wwpn": wwpn
            })
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT * 2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT * 5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(
                    set(cur_mpath_devs).difference(set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail(
                        "lun symbolic links not found in "
                        "/dev/disk/by-path/ for %s" % first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail(
                    "Fail to create qcow2 on blk dev: %s", detail)
        else:
            raise exceptions.TestFail("Don't have a vaild path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {
                'type_name': disk_type,
                'target_dev': device_target,
                'target_bus': target_bus,
                'source_pool': pool_name,
                'source_volume': test_vol,
                'driver_type': driver_type
            }
        else:
            disk_params = {
                'type_name': disk_type,
                'device': disk_device,
                'driver_name': driver_name,
                'driver_type': driver_type,
                'source_file': path_to_blk,
                'target_dev': device_target,
                'target_bus': target_bus
            }
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s", vm_name,
                          snapshot_name)
            utlv.check_exit_status(
                virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s", file_existence,
                          file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s", file_existence,
                          file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
        # if diskonly, delete --metadata and remove files
        # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s", snap,
                          options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s",
                                      snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
コード例 #56
0
ファイル: yonit_bitmap.py プロジェクト: zixi-chen/tp-qemu
def run(test, params, env):
    """
    Run yonit bitmap benchmark in Windows guests, especially win7 32bit,
    for regression test of BZ #556455.

    Run the benchmark (infinite) loop background using
    run_guest_test_background, and detect the existence of the process
    in guest.

      1. If the process exits before test timeout, that means the benchmark
      exits unexpectedly, and BSOD may have happened, which can be verified
      from the screenshot saved by virt-test.
      2. If just timeout happen, this test passes, i.e. the guest stays
      good while running the benchmark in the given time.

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """

    sec_per_day = 86400  # seconds per day
    test_timeout = int(params.get("test_timeout", sec_per_day))
    login_timeout = int(params.get("login_timeout", 360))

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=login_timeout)

    # Since the benchmark runs into an infinite loop, the background process
    # will never return, unless we get a BSOD.
    #
    # We set the test_timeout of the background guest_test much bigger than
    # that of this test to make sure that the background benchmark is still
    # running while the the foreground detecting is on going.
    error_context.context("run benchmark test in background", logging.info)
    params["test_timeout"] = test_timeout * 2 + sec_per_day
    logging.info("set Yonit bitmap test timeout to"
                 " %ss" % params["test_timeout"])
    pid = guest_test.run_guest_test_background(test, params, env)
    if pid < 0:
        session.close()
        test.error("Could not create child process to execute "
                   "guest_test background")

    def is_yonit_benchmark_launched():
        if session.cmd_status(
                'tasklist | find /I "compress_benchmark_loop"') != 0:
            logging.debug("yonit bitmap benchmark was not found")
            return False
        return True

    error_context.context(
        "Watching Yonit bitmap benchmark is"
        " running until timeout", logging.info)
    try:
        # Start detecting whether the benchmark is started a few mins
        # after the background test launched, as the downloading
        # will take some time.
        launch_timeout = login_timeout
        if utils_misc.wait_for(is_yonit_benchmark_launched, launch_timeout,
                               180, 5):
            logging.debug("Yonit bitmap benchmark was launched successfully")
        else:
            test.error("Failed to launch yonit bitmap benchmark")

        # If the benchmark exits before timeout, errors happened.
        if utils_misc.wait_for(lambda: not is_yonit_benchmark_launched(),
                               test_timeout, 60, 10):
            test.error("Yonit bitmap benchmark exits unexpectly")
        else:
            if session.is_responsive():
                logging.info("Guest stays good until test timeout")
            else:
                test.fail("Guest is dead")
    finally:
        logging.info("Kill the background benchmark tracking process")
        utils_misc.safe_kill(pid, signal.SIGKILL)
        guest_test.wait_guest_test_background(pid)
        session.close()
コード例 #57
0
    def netperf_test():
        """
        Netperf stress test between two guest.
        """
        n_client = utils_netperf.NetperfClient(
            addresses[0],
            params["client_path"],
            netperf_source=os.path.join(data_dir.get_deps_dir("netperf"),
                                        params.get("netperf_client_link")),
            client=params.get("shell_client"),
            port=params.get("shell_port"),
            prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"),
            username=params.get("username"),
            password=params.get("password"),
            linesep=params.get("shell_linesep",
                               "\n").encode().decode('unicode_escape'),
            status_test_command=params.get("status_test_command", ""),
            compile_option=params.get("compile_option_client", ""))

        n_server = utils_netperf.NetperfServer(
            addresses[1],
            params["server_path"],
            netperf_source=os.path.join(data_dir.get_deps_dir("netperf"),
                                        params.get("netperf_server_link")),
            username=params.get("username"),
            password=params.get("password"),
            client=params.get("shell_client"),
            port=params.get("shell_port"),
            prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"),
            linesep=params.get("shell_linesep",
                               "\n").encode().decode('unicode_escape'),
            status_test_command=params.get("status_test_command", "echo $?"),
            compile_option=params.get("compile_option_server", ""))

        try:
            n_server.start()
            # Run netperf with message size defined in range.
            netperf_test_duration = params.get_numeric("netperf_test_duration")
            test_protocols = params.get("test_protocols", "TCP_STREAM")
            netperf_output_unit = params.get("netperf_output_unit")
            test_option = params.get("test_option", "")
            test_option += " -l %s" % netperf_test_duration
            if netperf_output_unit in "GMKgmk":
                test_option += " -f %s" % netperf_output_unit
            t_option = "%s -t %s" % (test_option, test_protocols)
            n_client.bg_start(addresses[1],
                              t_option,
                              params.get_numeric("netperf_para_sessions"),
                              params.get("netperf_cmd_prefix", ""),
                              package_sizes=params.get("netperf_sizes"))
            if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 1,
                                   "Wait netperf test start"):
                logging.info("Netperf test start successfully.")
            else:
                test.error("Can not start netperf client.")
            utils_misc.wait_for(
                lambda: not n_client.is_netperf_running(),
                netperf_test_duration, 0, 5,
                "Wait netperf test finish %ss" % netperf_test_duration)
        finally:
            n_server.stop()
            n_server.cleanup(True)
            n_client.cleanup(True)
コード例 #58
0
ファイル: watchdog.py プロジェクト: zhenyzha/tp-libvirt
    def confirm_guest_status():
        """
        Confirm the guest status after watchdog triggered
        """
        def _booting_completed():
            session = vm.wait_for_login()
            status = None
            second_boot_time = None
            try:
                status, second_boot_time = session.cmd_status_output(
                    "uptime --since")
                logging.debug("The second boot time is %s", second_boot_time)
            except (aexpect.ShellStatusError,
                    aexpect.ShellProcessTerminatedError) as e:
                logging.error("Exception caught:%s", e)

            session.close()
            return second_boot_time > first_boot_time

        def _inject_nmi():
            session = vm.wait_for_login()
            status, output = session.cmd_status_output("dmesg | grep -i nmi")
            session.close()
            if status == 0:
                logging.debug(output)
                return True
            return False

        def _inject_nmi_event():
            virsh_session.send_ctrl("^C")
            output = virsh_session.get_stripped_output()
            if "inject-nmi" not in output:
                return False
            return True

        def _check_dump_file(dump_path, domain_id):
            dump_file = glob.glob('%s%s-*' % (dump_path, domain_id))
            if len(dump_file):
                logging.debug("Find the auto core dump file:\n%s",
                              dump_file[0])
                os.remove(dump_file[0])
                return True
            return False

        if action in ["poweroff", "shutdown"]:
            if not utils_misc.wait_for(lambda: vm.state() == "shut off", 180,
                                       10):
                test.fail("Guest not shutdown after watchdog triggered")
            else:
                logging.debug(
                    "Guest is in shutdown state after watchdog triggered")
        elif action == "reset":
            if not utils_misc.wait_for(_booting_completed, 600, 10):
                test.fail("Guest not reboot after watchdog triggered")
            else:
                logging.debug("Guest is rebooted after watchdog triggered")
        elif action == "pause":
            if utils_misc.wait_for(lambda: vm.state() == "paused", 180, 10):
                logging.debug(
                    "Guest is in paused status after watchdog triggered.")
                cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip()
                logging.debug("Check guest status: %s\n", cmd_output)
                if cmd_output != "paused (watchdog)":
                    test.fail(
                        "The domstate is not correct after dump by watchdog")
            else:
                test.fail("Guest not pause after watchdog triggered")
        elif action == "none":
            if utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10):
                test.fail("Guest shutdown unexpectedly")
            else:
                logging.debug(
                    "Guest is not in shutoff state since watchdog action is none."
                )
        elif action == "inject-nmi":
            if model != "diag288" and not utils_misc.wait_for(
                    _inject_nmi, 180, 10):
                test.fail(
                    "Guest not receive inject-nmi after watchdog triggered\n")
            elif not utils_misc.wait_for(_inject_nmi_event, 180, 10):
                test.fail("No inject-nmi watchdog event caught")
            else:
                logging.debug(
                    "Guest received inject-nmi and inject-nmi watchdog event "
                    " has been caught.")
            virsh_session.close()
        elif action == "dump":
            domain_id = vm.get_id()
            dump_path = "/var/lib/libvirt/qemu/dump/"
            if not utils_misc.wait_for(
                    lambda: _check_dump_file(dump_path, domain_id), 180, 10):
                test.fail(
                    "No auto core dump file found after watchdog triggered")
            else:
                logging.debug(
                    "VM core has been dumped after watchdog triggered.")
コード例 #59
0
def run_timerdevice_clock_drift_with_ntp(test, params, env):
    """
    Timer device check clock frequency offset using NTP on CPU starved guest:

    1) Check for an appropriate clocksource on host.
    2) Boot the guest.
    3) Copy time-warp-test.c to guest.
    4) Compile the time-warp-test.c.
    5) Stop ntpd and apply load on guest.
    6) Pin every vcpu to a physical cpu.
    7) Verify each vcpu is pinned on host.
    8) Run time-warp-test on guest.
    9) Start ntpd on guest.
    10) Check the drift in /var/lib/ntp/drift file on guest after hours
        of running.

    @param test: QEMU test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    def _drift_file_exist():
        try:
            session.cmd("test -f /var/lib/ntp/drift")
            return True
        except Exception:
            return False

    error.context("Check for an appropriate clocksource on host", logging.info)
    host_cmd = "cat /sys/devices/system/clocksource/"
    host_cmd += "clocksource0/current_clocksource"
    if not "tsc" in utils.system_output(host_cmd):
        raise error.TestNAError("Host must use 'tsc' clocksource")

    error.context("Boot the guest", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    sess_guest_load = vm.wait_for_login(timeout=timeout)

    error.context("Copy time-warp-test.c to guest", logging.info)
    src_file_name = os.path.join(data_dir.get_root_dir(), "shared", "deps",
                                 "time-warp-test.c")
    vm.copy_files_to(src_file_name, "/tmp")

    error.context("Compile the time-warp-test.c", logging.info)
    cmd = "cd /tmp/;"
    cmd += " yum install -y popt-devel;"
    cmd += " rm -f time-warp-test;"
    cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt"
    sess_guest_load.cmd(cmd)

    error.context("Stop ntpd and apply load on guest", logging.info)
    sess_guest_load.cmd("yum install -y ntp; service ntpd stop")
    load_cmd = "for ((I=0; I<`grep 'processor id' /proc/cpuinfo| wc -l`; I++));"
    load_cmd += " do taskset -c $I /bin/bash -c 'for ((;;)); do X=1; done &';"
    load_cmd += " done"
    sess_guest_load.sendline(load_cmd)

    error.context("Pin every vcpu to a physical cpu", logging.info)
    host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"]
    host_cpu_num = utils.system_output(host_cpu_cnt_cmd).strip()
    host_cpu_list = (_ for _ in range(int(host_cpu_num)))
    cpu_pin_list = zip(vm.vcpu_threads, host_cpu_list)
    if len(cpu_pin_list) < len(vm.vcpu_threads):
        raise error.TestNAError("There isn't enough physical cpu to"
                                " pin all the vcpus")
    for vcpu, pcpu in cpu_pin_list:
        utils.system("taskset -p -c %s %s" % (pcpu, vcpu))

    error.context("Verify each vcpu is pinned on host", logging.info)

    error.context("Run time-warp-test", logging.info)
    session = vm.wait_for_login(timeout=timeout)
    cmd = "/tmp/time-warp-test > /dev/null &"
    session.sendline(cmd)

    error.context("Start ntpd on guest", logging.info)
    cmd = "service ntpd start; sleep 1; echo"
    session.cmd(cmd)

    error.context("Check if the drift file exists on guest", logging.info)
    test_run_timeout = float(params["test_run_timeout"])
    try:
        utils_misc.wait_for(_drift_file_exist, test_run_timeout, step=5)
    except aexpect.ShellCmdError, detail:
        raise error.TestError("Failed to wait for the creation of"
                              " /var/lib/ntp/drift file. Detail: '%s'" %
                              detail)
コード例 #60
0
def run(test, params, env):
    """
    Test for vhba hostdev passthrough.
    1. create a vhba
    2. prepare hostdev xml for lun device of the newly created vhba
    3.1 If hot attach, attach-device the hostdev xml to vm
    3.2 If cold attach, add the hostdev to vm and start it
    4. login the vm and check the attached disk
    5. detach-device the hostdev xml
    6. login the vm to check the partitions
    """
    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param vm: VM guest.
        :param target: Disk dev in VM.
        :return: True if check successfully.
       """
        try:

            def get_attached_disk():
                session = vm.wait_for_login()
                new_parts = utils_disk.get_parts_list(session)
                session.close()
                added_parts = list(set(new_parts).difference(set(old_parts)))
                return added_parts

            added_parts = utils_misc.wait_for(get_attached_disk, _TIMEOUT)
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            try:
                cmd_status, cmd_output = session.cmd_status_output(cmd)
            except Exception as detail:
                test.error("Error occurred when run cmd: fdisk, %s" % detail)
            logging.info("Check disk operation in VM:\n%s", cmd_output)
            session.close()
            if cmd_status != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError,
                aexpect.ShellError) as detail:
            logging.error(str(detail))
            return False

    try:
        status_error = "yes" == params.get("status_error", "no")
        vm_name = params.get("main_vm", "avocado-vt-vm1")
        device_target = params.get("hostdev_disk_target", "hdb")
        scsi_wwnn = params.get("scsi_wwnn", "ENTER.YOUR.WWNN")
        scsi_wwpn = params.get("scsi_wwpn", "ENTER.YOUR.WWPN")
        attach_method = params.get('attach_method', 'hot')
        vm = env.get_vm(vm_name)
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        virsh_dargs = {'debug': True, 'ignore_status': True}
        new_vhbas = []

        if scsi_wwnn.count("ENTER.YOUR.WWNN") or \
                scsi_wwpn.count("ENTER.YOUR.WWPN"):
            test.cancel("You didn't provide proper wwpn/wwnn")
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()
        old_parts = utils_disk.get_parts_list(session)
        # find first online hba
        online_hbas = []
        online_hbas = utils_npiv.find_hbas("hba")
        if not online_hbas:
            test.cancel("NO ONLINE HBAs!")
        first_online_hba = online_hbas[0]
        # create vhba based on the first online hba
        old_vhbas = utils_npiv.find_hbas("vhba")
        logging.debug("Original online vHBAs: %s", old_vhbas)
        new_vhba = utils_npiv.nodedev_create_from_xml({
            "nodedev_parent": first_online_hba,
            "scsi_wwnn": scsi_wwnn,
            "scsi_wwpn": scsi_wwpn
        })
        # enable multipath service
        process.run("mpathconf --enable", shell=True)
        if not utils_misc.wait_for(
                lambda: utils_npiv.is_vhbas_added(old_vhbas),
                timeout=_TIMEOUT):
            test.fail("vhba not successfully created")
        new_vhbas.append(new_vhba)
        # find first available lun of the newly created vhba
        lun_dicts = []
        first_lun = {}
        if not utils_misc.wait_for(lambda: utils_npiv.find_scsi_luns(new_vhba),
                                   timeout=_TIMEOUT):
            test.fail("There is no available lun storage for "
                      "wwpn: %s, please check your wwns or "
                      "contact IT admins" % scsi_wwpn)
        lun_dicts = utils_npiv.find_scsi_luns(new_vhba)
        logging.debug("The luns discovered are: %s", lun_dicts)
        first_lun = lun_dicts[0]
        # prepare hostdev xml for the first lun
        kwargs = {
            'addr_bus': first_lun['bus'],
            'addr_target': first_lun['target'],
            'addr_unit': first_lun['unit']
        }

        new_hostdev_xml = utils_npiv.create_hostdev_xml(
            adapter_name="scsi_host" + first_lun['scsi'], **kwargs)
        logging.info("New hostdev xml as follow:")
        logging.info(new_hostdev_xml)
        new_hostdev_xml.xmltreefile.write()
        if attach_method == "hot":
            # attach-device the lun's hostdev xml to guest vm
            result = virsh.attach_device(vm_name, new_hostdev_xml.xml)
            libvirt.check_exit_status(result, status_error)
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_hostdev_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            logging.debug("The new vm's xml is: \n%s", vmxml)

        # login vm and check the disk
        check_result = check_in_vm(vm, device_target, old_parts)
        if not check_result:
            test.fail("check disk in vm failed")
        result = virsh.detach_device(vm_name, new_hostdev_xml.xml)
        libvirt.check_exit_status(result, status_error)
        # login vm and check disk actually removed
        if not vm.session:
            session = vm.wait_for_login()
        parts_after_detach = utils_disk.get_parts_list(session)
        old_parts.sort()
        parts_after_detach.sort()
        if parts_after_detach == old_parts:
            logging.info("hostdev successfully detached.")
        else:
            test.fail("Device not successfully detached. "
                      "Still existing in vm's /proc/partitions")
    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        # recover vm
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)