Exemple #1
0
    def cleanup(self):
        error_context.context("performing qemu_io cleanup", logging.debug)
        if os.path.isfile(self.lvtest_device):
            process.run("fuser -k %s" % self.lvtest_device)
            time.sleep(2)
        l_result = process.run("lvdisplay")
        # Let's remove all volumes inside the volume group created
        if self.lvtest_name in l_result.stdout:
            process.run("lvremove -f %s" % self.lvtest_device)
        # Now, removing the volume group itself
        v_result = process.run("vgdisplay")
        if self.vgtest_name in v_result.stdout:
            process.run("vgremove -f %s" % self.vgtest_name)
        # Now, if we can, let's remove the physical volume from lvm list
        p_result = process.run("pvdisplay")
        l_result = process.run('losetup -a')
        for l in self.loopback:
            if l in p_result.stdout:
                process.run("pvremove -f %s" % l)
            if l in l_result.stdout:
                try:
                    process.run("losetup -d %s" % l)
                except process.CmdError as e:
                    logging.error("Failed to liberate loopback %s, "
                                  "error msg: '%s'", l, e)

        for f in self.raw_files:
            if os.path.isfile(f):
                os.remove(f)
Exemple #2
0
def run(test, params, env):
    """
    Balloon negative test, balloon windows guest memory to very small value.
    1) boot a guest with balloon device.
    2) enable and check driver verifier in guest.
    3) evict guest memory to 10M.
    4) repeat step 3 for many times.
    5) check guest free memory.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """

    error_context.context("Boot guest with balloon device", logging.info)
    vm = env.get_vm(params["main_vm"])
    session = vm.wait_for_login()
    driver_name = params.get("driver_name", "balloon")
    session = utils_test.qemu.windrv_check_running_verifier(session, vm,
                                                            test, driver_name)
    balloon_test = BallooningTestWin(test, params, env)
    expect_mem = int(params["expect_memory"])
    repeat_times = int(params.get("repeat_times", 10))

    while repeat_times:
        try:
            balloon_test.vm.balloon(expect_mem)
        except QMPEventError:
            pass
        balloon_test._balloon_post_action()
        time.sleep(30)
        repeat_times -= 1
    ballooned_memory = balloon_test.ori_mem - expect_mem
    balloon_test.memory_check("after balloon guest memory 10 times", ballooned_memory)
    session.close()
Exemple #3
0
    def memory_check(vm, get_polling_output, keyname):
        """
        Check memory status.

        :param vm: VM object.
        :param get_polling_output: output of get polling in qmp.
        :param keyname: key name of the output of the 'qom-get' property.
        """
        error_context.context("Check whether memory status as expected",
                              logging.info)
        check_mem_ratio = float(params.get("check_mem_ratio", 0.1))
        mem_base = MemoryBaseTest(test, params, env)
        if keyname == "stat-free-memory":
            guest_mem = mem_base.get_guest_free_mem(vm)
        elif keyname == "stat-total-memory":
            guest_mem = mem_base.get_vm_mem(vm)

        stat_memory_qmp = get_polling_output['stats'][keyname]
        stat_memory_qmp = "%sB" % stat_memory_qmp
        stat_memory_qmp = int(float(utils_misc.normalize_data_size(
                                   (stat_memory_qmp), order_magnitude="M")))
        if (abs(guest_mem - stat_memory_qmp)) > (guest_mem * check_mem_ratio):
            raise exceptions.TestFail("%s of guest %s is not equal to %s in"
                                      " qmp, the ratio is %s" % (keyname,
                                                                 guest_mem,
                                                                 stat_memory_qmp,
                                                                 check_mem_ratio))
 def query_status(self):
     """
     query running block mirroring job info;
     """
     error_context.context("query job status", logging.info)
     if not self.get_status():
         self.test.fail("No active job")
Exemple #5
0
    def balloon_memory(session, device_path):
        """
        Doing memory balloon in a loop and check memory status during balloon.

        :param session: VM session.
        :param device_path: balloon polling path.
        """
        repeat_times = int(params.get("repeat_times", 5))
        logging.info("repeat times: %d" % repeat_times)
        balloon_test = BallooningTestWin(test, params, env)

        while repeat_times:
            for tag in params.objects('test_tags'):
                error_context.context("Running %s test" % tag, logging.info)
                params_tag = params.object_params(tag)
                balloon_type = params_tag['balloon_type']
                min_sz, max_sz = balloon_test.get_memory_boundary(balloon_type)
                expect_mem = int(random.uniform(min_sz, max_sz))

                quit_after_test = balloon_test.run_ballooning_test(expect_mem, tag)
                get_polling_output = vm.monitor.qom_get(device_path, get_balloon_property)
                memory_check(vm, get_polling_output, 'stat-free-memory')
                if quit_after_test:
                    return

            balloon_test.reset_memory()
            repeat_times -= 1
Exemple #6
0
 def plug(self):
     hotplug_cmd = self.params.get("cpu_hotplug_cmd", "")
     for cpu in range(self.smp, self.maxcpus, self.vcpu_threads):
         error_context.context("hot-pluging vCPU %s" % cpu, logging.info)
         self.vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
         time.sleep(1)
     utils_misc.check_if_vm_vcpu_match(240, self.vm)
Exemple #7
0
def _setup_hugepage(params):
    """
    Setup the configure of host:
     1. Check the size of hugepage on host.
     2. Calculate the num of assigning pages by (assigning total size /
        hugepage size).
     3. Set hugepage by executing "echo $num > /proc/sys/vm/nr_hugepages".
     4. Mount this hugepage to /mnt/kvm_hugepage.
    """
    size = params['total_hugepage_size']
    huge_page = test_setup.HugePageConfig(params)
    error_context.context('Assign %sMB hugepages in host.' % size, logging.info)

    hugepage_size = huge_page.get_hugepage_size()
    logging.debug('Hugepage size is %skB in host.' % hugepage_size)

    huge_page.target_hugepages = int((int(size) * 1024) // hugepage_size)
    logging.debug('Set hugepages to %d pages in host.'
                  % huge_page.target_hugepages)
    huge_page.set_node_num_huge_pages(huge_page.target_hugepages,
                                      0, hugepage_size)

    error_context.context('mount hugepages to %s'
                          % huge_page.hugepage_path, logging.info)
    huge_page.mount_hugepage_fs()
 def verify_guest_clock_source(session, expected):
     error_context.context("Check the current clocksource in guest",
                           logging.info)
     cmd = "cat /sys/devices/system/clocksource/"
     cmd += "clocksource0/current_clocksource"
     if expected not in session.cmd(cmd):
         test.fail("Guest didn't use '%s' clocksource" % expected)
 def start_suspend(self, **args):
     """
     Start suspend via qemu guest agent.
     """
     error_context.context("Suspend guest via guest agent", logging.info)
     if self.guest_agent:
         self.guest_agent.suspend(self.suspend_mode)
def create_gluster_uri(params, stripped=False):
    """
    Find/create gluster volume
    """
    vol_name = params.get("gluster_volume_name")

    error_context.context("Host name lookup failed")
    hostname = socket.gethostname()
    gluster_server = params.get("gluster_server")
    gluster_port = params.get("gluster_port", "0")
    if not gluster_server:
        gluster_server = hostname
    if not gluster_server or gluster_server == "(none)":
        if_up = utils_net.get_net_if(state="UP")
        ip_addr = utils_net.get_net_if_addrs(if_up[0])["ipv4"][0]
        gluster_server = ip_addr

    # Start the gluster dameon, if not started
    # Building gluster uri
    gluster_uri = None
    if stripped:
        gluster_uri = "%s:/%s" % (gluster_server, vol_name)
    else:
        gluster_uri = "gluster://%s:%s/%s/" % (gluster_server, gluster_port,
                                               vol_name)
    return gluster_uri
Exemple #11
0
    def run_test(qemu_src_dir):
        """
        run QEMU I/O test suite

        :qemu_src_dir: path of qemu source code
        """
        iotests_root = params.get("iotests_root", "tests/qemu-iotests")
        extra_options = params.get("qemu_io_extra_options", "")
        image_format = params.get("qemu_io_image_format")
        result_pattern = params.get("iotests_result_pattern")
        error_context.context("running qemu-iotests for image format %s"
                              % image_format, logging.info)
        os.environ["QEMU_PROG"] = utils_misc.get_qemu_binary(params)
        os.environ["QEMU_IMG_PROG"] = utils_misc.get_qemu_img_binary(params)
        os.environ["QEMU_IO_PROG"] = utils_misc.get_qemu_io_binary(params)
        os.environ["QEMU_NBD_PROG"] = utils_misc.get_binary('qemu-nbd', params)
        os.chdir(os.path.join(qemu_src_dir, iotests_root))
        cmd = './check'
        if extra_options:
            cmd += " %s" % extra_options
        cmd += " -%s" % image_format
        output = process.system_output(cmd, ignore_status=True, shell=True)
        match = re.search(result_pattern, output, re.I | re.M)
        if match:
            iotests_log_file = "qemu_iotests_%s.log" % image_format
            iotests_log_file = utils_misc.get_path(test.debugdir, iotests_log_file)
            with open(iotests_log_file, 'w+') as log:
                log.write(output)
                log.flush()
            msg = "Total test %s cases, %s failed"
            raise exceptions.TestFail(msg % (match.group(2), match.group(1)))
Exemple #12
0
    def ethtool_set(session, status):
        """
        Set ethernet device offload status

        :param status: New status will be changed to
        """
        txt = "Set offload status for device "
        txt += "'%s': %s" % (ethname, str(status))
        error_context.context(txt, logging.info)

        cmd = "ethtool -K %s " % ethname
        cmd += " ".join([o + ' ' + s for o, s in status.items()])
        err_msg = "Failed to set offload status for device '%s'" % ethname
        try:
            session.cmd_output_safe(cmd)
        except aexpect.ShellCmdError as e:
            logging.error("%s, detail: %s", err_msg, e)
            return False

        curr_status = dict((k, v) for k, v in ethtool_get(session).items()
                           if k in status.keys())
        if curr_status != status:
            logging.error("%s, got: '%s', expect: '%s'", err_msg,
                          str(curr_status), str(status))
            return False

        return True
def create_gluster_vol(params):
    vol_name = params.get("gluster_volume_name")
    force = params.get('force_recreate_gluster') == "yes"

    brick_path = params.get("gluster_brick")
    if not os.path.isabs(brick_path):  # do nothing when path is absolute
        base_dir = params.get("images_base_dir", data_dir.get_data_dir())
        brick_path = os.path.join(base_dir, brick_path)

    error_context.context("Host name lookup failed")
    hostname = socket.gethostname()
    if not hostname or hostname == "(none)":
        if_up = utils_net.get_net_if(state="UP")
        for i in if_up:
            ipv4_value = utils_net.get_net_if_addrs(i)["ipv4"]
            logging.debug("ipv4_value is %s", ipv4_value)
            if ipv4_value != []:
                ip_addr = ipv4_value[0]
                break
        hostname = ip_addr

    # Start the gluster dameon, if not started
    glusterd_start()
    # Check for the volume is already present, if not create one.
    if not is_gluster_vol_avail(vol_name) or force:
        return gluster_vol_create(vol_name, hostname, brick_path, force)
    else:
        return True
 def check_interrupt(session, vectors):
     error_context.context("Check the cpu interrupt of virito",
                           logging.info)
     vectors = int(vectors)
     irq_check_cmd = params["irq_check_cmd"]
     output = session.cmd_output(irq_check_cmd).strip()
     if vectors == 0 or vectors == 1:
         if not (re.findall("IO-APIC.*fasteoi|XICS.*Level|XIVE.*Level",
                            output)):
             msg = "Could not find interrupt controller for virito device"
             msg += " when vectors = %d" % vectors
             test.fail(msg)
     elif 2 <= vectors and vectors <= 8:
         if not re.findall("virtio[0-9]-virtqueues", output):
             msg = "Could not find the virtio device for MSI-X interrupt"
             msg += " when vectors = %d " % vectors
             msg += "Command %s got output %s" % (irq_check_cmd, output)
             test.fail(msg)
     elif vectors == 9 or vectors == 10:
         if not (re.findall("virtio[0-9]-input", output) and
                 re.findall("virtio[0-9]-output", output)):
             msg = "Could not find the virtio device for MSI-X interrupt"
             msg += " when vectors = %d " % vectors
             msg += "Command %s got output %s" % (irq_check_cmd, output)
             test.fail(msg)
    def start_stress(session):
        """
        Load stress in guest.
        """
        error_context.context("Load stress in guest", logging.info)
        stress_type = params.get("stress_type", "none")

        if stress_type == "none":
            return

        if stress_type == "netperf":
            bg = ""
            bg_stress_test = params.get("run_bgstress")

            bg = utils_misc.InterruptedThread(utils_test.run_virt_sub_test,
                                              (test, params, env),
                                              {"sub_type": bg_stress_test})
            bg.start()

        if stress_type == "io":
            install_stress_app(session)

            cmd = params.get("start_cmd")
            logging.info("Launch stress app in guest with command: '%s'" % cmd)
            session.sendline(cmd)

        running = utils_misc.wait_for(lambda: stress_running(session),
                                      timeout=150, step=5)
        if not running:
            test.error("Stress isn't running")

        logging.info("Stress running now")
Exemple #16
0
    def balloon_memory(self, new_mem):
        """
        Baloon memory to new_mem and verifies on both qemu monitor and
        guest OS if change worked.

        :param new_mem: New desired memory.
        :type new_mem: int
        """
        self.env["balloon_test"] = 0
        error_context.context("Change VM memory to %s" % new_mem, logging.info)
        try:
            self.vm.balloon(new_mem)
            self.env["balloon_test"] = 1
        except Exception as e:
            if self.params.get('illegal_value_check', 'no') == 'no' and new_mem != self.get_ballooned_memory():
                raise exceptions.TestFail("Balloon memory fail with error"
                                          " message: %s" % e)
        if new_mem > self.ori_mem:
            compare_mem = self.ori_mem
        elif new_mem == 0:
            compare_mem = self.current_mmem
        elif new_mem <= 100:
            self._balloon_post_action()
            self.current_mmem = self.get_ballooned_memory()
            compare_mem = self.current_mmem
        else:
            compare_mem = new_mem

        balloon_timeout = float(self.params.get("balloon_timeout", 480))
        status = utils_misc.wait_for((lambda: compare_mem ==
                                      self.get_ballooned_memory()),
                                     balloon_timeout)
        if status is None:
            raise exceptions.TestFail("Failed to balloon memory to expect"
                                      " value during %ss" % balloon_timeout)
Exemple #17
0
    def memory_check(self, step, ballooned_mem):
        """
        Check memory status according expect values

        :param step: the check point string
        :type step: string
        :param ballooned_mem: ballooned memory in current step
        :type ballooned_mem: int
        :return: memory size get from monitor and guest
        :rtype: tuple
        """
        error_context.context("Check memory status %s" % step, logging.info)
        mmem = self.get_ballooned_memory()
        gmem = self.get_memory_status()
        gcompare_threshold = int(self.params.get("guest_compare_threshold", 100))
        # for windows illegal test:set windows guest balloon in (1,100),free memory will less than 150M
        if ballooned_mem >= self.ori_mem - 100:
            timeout = float(self.params.get("login_timeout", 600))
            session = self.vm.wait_for_login(timeout=timeout)
            try:
                if self.get_win_mon_free_mem(session) > 150:
                    self.test.fail("Balloon_min test failed %s" % step)
            finally:
                session.close()
        else:
            guest_ballooned_mem = abs(gmem - self.ori_gmem)
            if (abs(mmem - self.ori_mem) != ballooned_mem or
                    (abs(guest_ballooned_mem - ballooned_mem) > gcompare_threshold)):
                self.error_report(step, self.ori_mem - ballooned_mem, mmem, gmem)
                raise exceptions.TestFail("Balloon test failed %s" % step)
        return (mmem, gmem)
Exemple #18
0
    def run_bg_stress_test(bg_stress_test):
        """
        Run backgroud test.

        :param bg_stress_test: Background test.
        :return: return the background case thread if it's successful;
                 else raise error.
        """
        error_context.context("Run test %s background" % bg_stress_test,
                              logging.info)
        stress_thread = None
        wait_time = float(params.get("wait_bg_time", 60))
        target_process = params.get("target_process", "")
        bg_stress_run_flag = params.get("bg_stress_run_flag")
        # Need to set bg_stress_run_flag in some cases to make sure all
        # necessary steps are active
        env[bg_stress_run_flag] = False
        stress_thread = utils.InterruptedThread(
            utils_test.run_virt_sub_test, (test, params, env),
            {"sub_type": bg_stress_test})
        stress_thread.start()
        if not utils_misc.wait_for(lambda: check_bg_running(target_process),
                                   120, 0, 1):
            raise exceptions.TestFail("Backgroud test %s is not "
                                      "alive!" % bg_stress_test)
        if params.get("set_bg_stress_flag", "no") == "yes":
            logging.info("Wait %s test start" % bg_stress_test)
            if not utils_misc.wait_for(lambda: env.get(bg_stress_run_flag),
                                       wait_time, 0, 0.5):
                err = "Fail to start %s test" % bg_stress_test
                raise exceptions.TestError(err)
        env["bg_status"] = 1
        return stress_thread
Exemple #19
0
    def _memory_stats_compare(self, keyname, memory_stat_qmp):
        """
        Check whether memory statistics from qmp is same with guest memory.

        :param keyname: key name of the output of the 'qom-get' property.
        :param memory_stat_qmp: memory stat values from qmp.
        """
        check_mem_ratio = float(self.params.get("check_mem_ratio", 0.1))
        check_mem_diff = float(self.params.get("check_mem_diff", 150))
        error_context.context("Get memory from guest", logging.info)
        if keyname == "stat-free-memory":
            guest_mem = self.get_guest_free_mem(self.vm)
        elif keyname == "stat-total-memory":
            guest_mem = self.get_vm_mem(self.vm)
        memory_stat_qmp = "%sB" % memory_stat_qmp
        memory_stat_qmp = int(float(utils_misc.normalize_data_size(
                                   memory_stat_qmp, order_magnitude="M")))
        mem_diff = float(abs(guest_mem - memory_stat_qmp))
        if ((mem_diff / guest_mem) > check_mem_ratio and
                mem_diff > check_mem_diff):
            self.test.fail("%s of guest %s is not equal to %s in qmp,the"
                           "acceptable ratio/diff is %s/%s" % (keyname,
                                                               guest_mem,
                                                               memory_stat_qmp,
                                                               check_mem_ratio,
                                                               check_mem_diff))
    def fsthaw(self, check_status=True):
        """
        Thaw File system on guest.

        :param check_status: Force this function to check the fsfreeze status
                             before/after sending cmd.
        :return: Thaw FS number if cmd succeed, -1 if guest agent doesn't
                 support fsfreeze cmd.
        """
        error_context.context("thaw all FS in guest '%s'" % self.vm.name)
        if check_status:
            self.verify_fsfreeze_status(self.FSFREEZE_STATUS_FROZEN)

        cmd = "guest-fsfreeze-thaw"
        if self.check_has_command(cmd):
            ret = self.cmd(cmd=cmd)
            if check_status:
                try:
                    self.verify_fsfreeze_status(self.FSFREEZE_STATUS_THAWED)
                # pylint: disable=E0712
                except VAgentFreezeStatusError:
                    # When the status is incorrect, reset fsfreeze status to
                    # 'thawed'.
                    self.cmd(cmd=cmd)
                    raise
            return ret
        return -1
Exemple #21
0
 def verify_info(self, params=None):
     """
     verify option is applied to image file correctly
     """
     error_context.context("verify option of converted image", logging.info)
     image_filename = storage.get_image_filename(params, self.data_dir)
     info = utils_test.get_image_info(image_filename)
     avalue = evalue = ""
     for option in params.objects("option_verified"):
         avalue = info.get(option)
         if option == "format":
             evalue = params.get("image_format")
         elif option == "lcounts":
             if params.get("lazy_refcounts") == "on":
                 evalue = "true"
             elif params.get("lazy_refcounts") == "off":
                 evalue = "false"
         elif option == "csize":
             csize = params.get("cluster_size")
             evalue = int(float(utils_misc.normalize_data_size(csize, "B")))
         elif option == "sparse_size":
             if info.get("dsize") < info.get("vsize"):
                 avalue = info.get("dsize")
                 evalue = info.get("vsize")
         else:
             evalue = params.get(option)
         if avalue is not None and avalue != evalue:
             msg = "Get wrong %s from image %s!" % (option, image_filename)
             msg += "Expect: %s, actual: %s" % (evalue, avalue)
             self.test.fail(msg)
def run(test, params, env):
    """
    monitor_cmds_check test:
    1). bootup vm with human and qmp monitor
    2). check commands in black_list is unavaliable in monitor

    :param test: Qemu test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.

    Notes:
        Please run this test with qemu/control.kernel-version to ensure it
        only run when requried package installed;
    """

    def is_supported(cmd):
        try:
            vm.monitor.verify_supported_cmd(cmd)
            return True
        except qemu_monitor.MonitorNotSupportedCmdError:
            return False

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    protocol = vm.monitor.protocol

    black_cmds = params.get("black_cmds", "").split()
    error_context.context("Verify black commands are unavaliable in "
                          "'%s' monitor" % protocol, logging.info)
    logging.info("Black commands: %s" % black_cmds)
    cmds = [cmd for cmd in black_cmds if is_supported(cmd)]
    if cmds:
        msg = "Unexpected commands %s found in %s monitor" % (cmds, protocol)
        test.fail(msg)
Exemple #23
0
 def cleanup(self, force_stop=False):
     error_context.context("Cleaning up test NFS share", logging.info)
     process.run("umount -l -f %s" % self.mnt_dir, shell=True)
     process.run("exportfs -u %s:%s" % (self.nfs_ip, self.nfs_dir),
                 shell=True)
     if force_stop:
         self.stop_service()
Exemple #24
0
def setup_win_driver_verifier(driver, vm, timeout=300):
    """
    Enable driver verifier for windows guest.

    :param driver: The driver which needs enable the verifier.
    :param vm: VM object.
    :param timeout: Timeout in seconds.
    """
    session = vm.wait_for_login(timeout=timeout)
    try:
        verifier_status = _check_driver_verifier(session, driver)[1]
        if not verifier_status:
            error_context.context("Enable %s driver verifier" % driver,
                                  logging.info)
            verifier_setup_cmd = "verifier /standard /driver %s.sys" % driver
            session.cmd(verifier_setup_cmd,
                        timeout=timeout,
                        ignore_all_errors=True)
            session = vm.reboot(session)
            verifier_status, output = _check_driver_verifier(session, driver)
            if not verifier_status:
                msg = "%s verifier is not enabled, details: %s" % (driver,
                                                                   output)
                raise exceptions.TestFail(msg)
        logging.info("%s verifier is enabled already" % driver)
    finally:
        session.close()
Exemple #25
0
    def check_memory(self, vm=None):
        """
        Check is guest memory is really match assgined to VM.

        :param vm: VM object, get VM object from env if vm is None.
        """
        error_context.context("Verify memory info", logging.info)
        if not vm:
            vm = self.env.get_vm(self.params["main_vm"])
        vm.verify_alive()
        threshold = float(self.params.get("threshold", 0.10))
        timeout = float(self.params.get("wait_resume_timeout", 60))
        # Notes:
        #    some sub test will pause VM, here need to wait VM resume
        # then check memory info in guest.
        utils_misc.wait_for(lambda: not vm.is_paused(), timeout=timeout)
        utils_misc.verify_host_dmesg()
        self.os_type = self.params.get("os_type")
        guest_mem_size = super(MemoryHotplugTest, self).get_guest_total_mem(vm)
        vm_mem_size = self.get_vm_mem(vm)
        if abs(guest_mem_size - vm_mem_size) > vm_mem_size * threshold:
            msg = ("Assigned '%s MB' memory to '%s'"
                   "but, '%s MB' memory detect by OS" %
                   (vm_mem_size, vm.name, guest_mem_size))
            raise exceptions.TestFail(msg)
    def virtio_serial_login(self, port='vs1'):
        error_context.context("Try to login guest via '%s'" % port,
                              logging.info)
        username = self.params.get("username")
        password = self.params.get("password")
        prompt = self.params.get("shell_prompt", "[#$]")
        linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n"))
        for vport in self.get_virtio_ports(self.vm)[1]:
            if vport.name == port:
                break
            vport = None
        if not vport:
            self.test.error("Not virtio serial port '%s' found" % port)

        logfile = "serial-%s-%s.log" % (vport.name, self.vm.name)
        socat_cmd = "nc -U %s" % vport.hostfile
        session = aexpect.ShellSession(socat_cmd, auto_close=False,
                                       output_func=utils_misc.log_line,
                                       output_params=(logfile,),
                                       prompt=prompt)
        session.set_linesep(linesep)
        session.sendline()
        self.__sessions__.append(session)
        try:
            remote.handle_prompts(session, username, password, prompt, 180)
            self.test.fail("virtio serial '%s' should no " % port +
                           "channel to login")
        except remote.LoginTimeoutError:
            self.__sessions__.append(session)
            logging.info("Can't login via %s" % port)
        return session
Exemple #27
0
 def run_subtest(sub_test):
     """
     Run subtest(e.g. rng_bat,reboot,shutdown) when it's not None
     :param sub_test: subtest name
     """
     error_context.context("Run %s subtest" % sub_test)
     utils_test.run_virt_sub_test(test, params, env, sub_test)
Exemple #28
0
    def _get_service_cmds(self):
        """
        Figure out the commands used to control the NFS service.
        """
        error_context.context("Finding out commands to handle NFS service",
                              logging.info)
        service = utils_path.find_command("service")
        try:
            systemctl = utils_path.find_command("systemctl")
        except ValueError:
            systemctl = None

        if systemctl is not None:
            init_script = "/etc/init.d/nfs"
            service_file = "/lib/systemd/system/nfs-server.service"
            if os.path.isfile(init_script):
                service_name = "nfs"
            elif os.path.isfile(service_file):
                service_name = "nfs-server"
            else:
                raise NFSCorruptError("Files %s and %s absent, don't know "
                                      "how to set up NFS for this host" %
                                      (init_script, service_file))
            start_cmd = "%s start %s.service" % (systemctl, service_name)
            stop_cmd = "%s stop %s.service" % (systemctl, service_name)
            restart_cmd = "%s restart %s.service" % (systemctl, service_name)
            status_cmd = "%s status %s.service" % (systemctl, service_name)
        else:
            start_cmd = "%s nfs start" % service
            stop_cmd = "%s nfs stop" % service
            restart_cmd = "%s nfs restart" % service
            status_cmd = "%s nfs status" % service

        return [start_cmd, stop_cmd, restart_cmd, status_cmd]
 def pre_step(self):
     error_context.context("Config guest and reboot it", logging.info)
     pre_cmd = self.params.get("pre_cmd") + self.params.get("pre_cmd_extra")
     session = self.vm.wait_for_login(timeout=360)
     session.cmd(pre_cmd, timeout=240)
     session = self.vm.reboot(session=session, timeout=900, serial=False)
     self.__sessions__.append(session)
def lv_take_snapshot(vg_name, lv_name,
                     lv_snapshot_name, lv_snapshot_size):
    """
    Take a snapshot of the original logical volume.
    """
    error_context.context("Taking snapshot from original logical volume",
                          logging.info)

    if not vg_check(vg_name):
        raise exceptions.TestError("Volume group could not be found")
    if lv_check(vg_name, lv_snapshot_name):
        raise exceptions.TestError("Snapshot already exists")
    if not lv_check(vg_name, lv_name):
        raise exceptions.TestError("Snapshot's origin could not be found")

    cmd = ("lvcreate --size " + lv_snapshot_size + " --snapshot " +
           " --name " + lv_snapshot_name + " /dev/" + vg_name + "/" + lv_name)
    try:
        result = process.run(cmd)
    except process.CmdError as ex:
        if ('Logical volume "%s" already exists in volume group "%s"' %
            (lv_snapshot_name, vg_name) in results_stderr_52lts(ex.result) and
            re.search(re.escape(lv_snapshot_name + " [active]"),
                      results_stdout_52lts(process.run("lvdisplay")))):
            # the above conditions detect if merge of snapshot was postponed
            logging.warning(("Logical volume %s is still active! " +
                             "Attempting to deactivate..."), lv_name)
            lv_reactivate(vg_name, lv_name)
            result = process.run(cmd)
        else:
            raise ex
    logging.info(results_stdout_52lts(result).rstrip())
Exemple #31
0
 def add_target_data_disks(self):
     """Hot add target disk to VM with qmp monitor"""
     error_context.context("Create target disk")
     for tag in self.target_disks:
         disk = self.__target_disk_define_by_params(self.params, tag)
         disk.hotplug(self.main_vm)
def run(test, params, env):
    """
    Test Step:
        1. Boot up guest using the openvswitch bridge
        2. Setup related service in test enviroment(http, ftp etc.)(optional)
        3. Access the service in guest
        4. Setup access control rules in ovs to disable the access
        5. Access the service in guest
        6. Setup access control rules in ovs to enable the access
        7. Access the service in guest
        8. Delete the access control rules in ovs
        9. Access the service in guest

    Params:
        :param test: QEMU test object
        :param params: Dictionary with the test parameters
        :param env: Dictionary with test environment.
    """
    def access_service(access_sys,
                       access_targets,
                       disabled,
                       host_ip,
                       ref=False):
        err_msg = ""
        err_type = ""
        for asys in access_sys:
            for atgt in access_targets:
                logging.debug("Try to access target %s from %s", atgt, asys)

                access_params = access_sys[asys]
                atgt_disabled = access_params['disabled_%s' % atgt]
                if asys in vms_tags:
                    vm = env.get_vm(asys)
                    session = vm.wait_for_login(timeout=timeout)
                    run_func = session.cmd_status_output
                    remote_src = vm
                    ssh_src_ip = vm.get_address()
                else:
                    run_func = _system_statusoutput
                    remote_src = "localhost"
                    ssh_src_ip = host_ip
                if atgt in vms_tags:
                    vm = env.get_vm(atgt)
                    access_re_sub_string = vm.wait_for_get_address(0)
                else:
                    access_re_sub_string = host_ip

                access_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
                                    access_params['access_cmd'])
                ref_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
                                 access_params['ref_cmd'])

                if access_cmd in ["ssh", "telnet"]:
                    if atgt in vms_tags:
                        target_vm = env.get_vm(atgt)
                        target_ip = target_vm.get_address()
                    else:
                        target_vm = "localhost"
                        target_ip = host_ip
                    out = ""
                    out_err = ""
                    try:
                        out = remote_login(access_cmd, target_ip, remote_src,
                                           params, host_ip)
                        stat = 0
                    except remote.LoginError as err:
                        stat = 1
                        out_err = "Failed to login %s " % atgt
                        out_err += "from %s, err: %s" % (asys, err.output)
                    if "TelnetServer" in params.get("setup_cmd_windows", ""):
                        try:
                            out += remote_login(access_cmd, ssh_src_ip,
                                                target_vm, params, host_ip)
                        except remote.LoginError as err:
                            stat += 1
                            out_err += "Failed to login %s " % asys
                            out_err += "from %s, err: %s" % (atgt, err.output)
                    if out_err:
                        out = out_err
                else:
                    try:
                        stat, out = run_func(access_cmd, timeout=op_timeout)
                        check_string = access_params.get("check_from_output")
                        if check_string and check_string in out:
                            stat = 1
                    except aexpect.ShellTimeoutError as err:
                        out = err.output
                        stat = 1
                    except process.CmdError as err:
                        out = err.result.stderr
                        stat = err.result.exit_status

                    if access_params.get("clean_cmd"):
                        try:
                            run_func(access_params['clean_cmd'])
                        except Exception:
                            pass

                if disabled and atgt_disabled and stat == 0:
                    err_msg += "Still can access %s after" % atgt
                    err_msg += " disable it from ovs. "
                    err_msg += "Command: %s " % access_cmd
                    err_msg += "Output: %s" % out
                if disabled and atgt_disabled and stat != 0:
                    logging.debug("Can not access target as expect.")
                if not disabled and stat != 0:
                    if ref:
                        err_msg += "Can not access %s at the" % atgt
                        err_msg += " beginning. Please check your setup."
                        err_type = "ref"
                    else:
                        err_msg += "Still can not access %s" % atgt
                        err_msg += " after enable the access. "
                    err_msg += "Command: %s " % access_cmd
                    err_msg += "Output: %s" % out
                if err_msg:
                    if err_type == "ref":
                        test.cancel(err_msg)
                    test.fail(err_msg)

                if not ref_cmd:
                    return

                try:
                    stat, out = run_func(ref_cmd, timeout=op_timeout)
                except aexpect.ShellTimeoutError as err:
                    out = err.output
                    stat = 1
                except process.CmdError as err:
                    out = err.result.stderr
                    stat = err.result.exit_status

                if stat != 0:
                    if ref:
                        err_msg += "Reference command failed at beginning."
                        err_type = "ref"
                    else:
                        err_msg += "Reference command failed after setup"
                        err_msg += " the rules. "
                    err_msg += "Command: %s " % ref_cmd
                    err_msg += "Output: %s" % out
                if err_msg:
                    if err_type == "ref":
                        test.cancel(err_msg)
                    test.fail(err_msg)

    def get_acl_cmd(protocol, in_port, action, extra_options):
        acl_cmd = protocol.strip()
        acl_cmd += ",in_port=%s" % in_port.strip()
        if extra_options.strip():
            acl_cmd += ",%s" % ",".join(extra_options.strip().split())
        if action.strip():
            acl_cmd += ",action=%s" % action.strip()
        return acl_cmd

    def acl_rules_check(acl_rules, acl_setup_cmd):
        acl_setup_cmd = re.sub("action=", "actions=", acl_setup_cmd)
        acl_option = re.split(",", acl_setup_cmd)
        for line in acl_rules.splitlines():
            rule = [_.lower() for _ in re.split("[ ,]", line) if _]
            item_in_rule = 0

            for acl_item in acl_option:
                if acl_item.lower() in rule:
                    item_in_rule += 1

            if item_in_rule == len(acl_option):
                return True
        return False

    def remote_login(client, host, src, params_login, host_ip):
        src_name = src
        if src != "localhost":
            src_name = src.name
        logging.info("Login %s from %s", host, src_name)
        port = params_login["target_port"]
        username = params_login["username"]
        password = params_login["password"]
        prompt = params_login["shell_prompt"]
        linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n"))
        quit_cmd = params.get("quit_cmd", "exit")
        if host == host_ip:
            # Try to login from guest to host.
            prompt = r"^\[.*\][\#\$]\s*$"
            linesep = "\n"
            username = params_login["host_username"]
            password = params_login["host_password"]
            quit_cmd = "exit"

        if client == "ssh":
            # We only support ssh for Linux in this test
            cmd = ("ssh -o UserKnownHostsFile=/dev/null "
                   "-o StrictHostKeyChecking=no "
                   "-o PreferredAuthentications=password -p %s %s@%s" %
                   (port, username, host))
        elif client == "telnet":
            cmd = "telnet -l %s %s %s" % (username, host, port)
        else:
            raise remote.LoginBadClientError(client)

        if src == "localhost":
            logging.debug("Login with command %s", cmd)
            session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
        else:
            if params_login.get("os_type") == "windows":
                if client == "telnet":
                    cmd = "C:\\telnet.py %s %s " % (host, username)
                    cmd += "%s \"%s\" && " % (password, prompt)
                    cmd += "C:\\wait_for_quit.py"
                cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd
            else:
                cmd += " || sleep 5"
            session = src.wait_for_login()
            logging.debug("Sending login command: %s", cmd)
            session.sendline(cmd)
        try:
            out = remote.handle_prompts(session,
                                        username,
                                        password,
                                        prompt,
                                        timeout,
                                        debug=True)
        except Exception as err:
            session.close()
            raise err
        try:
            session.cmd(quit_cmd)
            session.close()
        except Exception:
            pass
        return out

    def setup_service(setup_target):
        setup_timeout = int(params.get("setup_timeout", 360))
        if setup_target == "localhost":
            setup_func = _system_statusoutput
            os_type = "linux"
        else:
            setup_vm = env.get_vm(setup_target)
            setup_session = setup_vm.wait_for_login(timeout=timeout)
            setup_func = setup_session.cmd
            os_type = params["os_type"]

        setup_params = params.object_params(os_type)
        setup_cmd = setup_params.get("setup_cmd", "service SERVICE restart")
        prepare_cmd = setup_params.get("prepare_cmd")
        setup_cmd = re.sub("SERVICE", setup_params.get("service", ""),
                           setup_cmd)

        error_context.context(
            "Set up %s service in %s" %
            (setup_params.get("service"), setup_target), logging.info)
        if params.get("copy_ftp_site") and setup_target != "localhost":
            ftp_site = os.path.join(data_dir.get_deps_dir(),
                                    params.get("copy_ftp_site"))
            ftp_dir = params.get("ftp_dir")
            setup_vm.copy_files_to(ftp_site, ftp_dir)
        access_param = setup_params.object_params(setup_target)
        if "ftp" in access_param.get("access_cmd") and os_type == "linux":
            setup_func(
                "sed -i 's/anonymous_enable=NO/anonymous_enable=YES/g' %s" %
                params["vsftpd_conf"])
        if prepare_cmd:
            setup_func(prepare_cmd, timeout=setup_timeout)
        setup_func(setup_cmd, timeout=setup_timeout)
        if setup_target != "localhost":
            setup_session.close()

    def stop_service(setup_target):
        setup_timeout = int(params.get("setup_timeout", 360))
        if setup_target == "localhost":
            setup_func = _system_statusoutput
            os_type = "linux"
        else:
            setup_vm = env.get_vm(setup_target)
            setup_session = setup_vm.wait_for_login(timeout=timeout)
            setup_func = setup_session.cmd
            os_type = params["os_type"]

        setup_params = params.object_params(os_type)
        stop_cmd = setup_params.get("stop_cmd", "service SERVICE stop")
        cleanup_cmd = setup_params.get("cleanup_cmd")
        stop_cmd = re.sub("SERVICE", setup_params.get("service", ""), stop_cmd)

        error_context.context(
            "Stop %s service in %s" %
            (setup_params.get("service"), setup_target), logging.info)
        if stop_cmd:
            setup_func(stop_cmd, timeout=setup_timeout)

        if cleanup_cmd:
            setup_func(cleanup_cmd, timeout=setup_timeout)

        if setup_target != "localhost":
            setup_session.close()

    timeout = int(params.get("login_timeout", '360'))
    op_timeout = int(params.get("op_timeout", "360"))
    acl_protocol = params['acl_protocol']
    acl_extra_options = params.get("acl_extra_options", "")

    for vm in env.get_all_vms():
        session = vm.wait_for_login(timeout=timeout)
        if params.get("disable_iptables") == "yes":
            session.cmd_output(
                "systemctl stop firewalld||service firewalld stop")
        if params.get("copy_scripts"):
            root_dir = data_dir.get_root_dir()
            script_dir = os.path.join(root_dir, "shared", "scripts")
            tmp_dir = params.get("tmp_dir", "C:\\")
            for script in params.get("copy_scripts").split():
                script_path = os.path.join(script_dir, script)
                vm.copy_files_to(script_path, tmp_dir)
        if params.get("copy_curl") and params.get("os_type") == "windows":
            curl_win_path = params.get("curl_win_path", "C:\\curl\\")
            session.cmd("dir {0} || mkdir {0}".format(curl_win_path))
            for script in params.get("copy_curl").split():
                curl_win_link = os.path.join(data_dir.get_deps_dir("curl"),
                                             script)
                vm.copy_files_to(curl_win_link, curl_win_path, timeout=60)
        session.close()

    vms_tags = params.objects("vms")
    br_name = params.get("netdst")
    if br_name == "private":
        br_name = params.get("priv_brname", 'atbr0')

    for setup_target in params.get("setup_targets", "").split():
        setup_service(setup_target)

    access_targets = params.get("access_targets", "localhost").split()
    deny_target = params.get("deny_target", "localhost")
    all_target = params.get("extra_target", "").split() + vms_tags
    target_port = params["target_port"]
    vm = env.get_vm(vms_tags[0])
    nic = vm.virtnet[0]
    if_name = nic.ifname
    params_nic = params.object_params("nic1")
    if params["netdst"] == "private":
        params_nic["netdst"] = params_nic.get("priv_brname", "atbr0")
    host_ip = utils_net.get_host_ip_address(params_nic)
    if deny_target in vms_tags:
        deny_vm = env.get_vm(deny_target)
        deny_vm_ip = deny_vm.wait_for_get_address(0)
    elif deny_target == "localhost":
        deny_vm_ip = host_ip
    if "NW_DST" in acl_extra_options:
        acl_extra_options = re.sub("NW_DST", deny_vm_ip, acl_extra_options)
    acl_extra_options = re.sub("TARGET_PORT", target_port, acl_extra_options)

    access_sys = {}
    for target in all_target:
        if target not in access_targets:
            if target in vms_tags:
                os_type = params["os_type"]
            else:
                os_type = "linux"
            os_params = params.object_params(os_type)
            access_param = os_params.object_params(target)
            check_from_output = access_param.get("check_from_output")

            access_sys[target] = {}
            access_sys[target]['access_cmd'] = access_param['access_cmd']
            access_sys[target]['ref_cmd'] = access_param.get('ref_cmd', "")
            access_sys[target]['clean_cmd'] = access_param.get(
                'clean_guest', "")
            if check_from_output:
                access_sys[target]['check_from_output'] = check_from_output
            for tgt in access_targets:
                tgt_param = access_param.object_params(tgt)
                acl_disabled = tgt_param.get("acl_disabled") == "yes"
                access_sys[target]['disabled_%s' % tgt] = acl_disabled

    error_context.context("Try to access target before setup the rules",
                          logging.info)
    access_service(access_sys, access_targets, False, host_ip, ref=True)
    error_context.context("Disable the access in ovs", logging.info)
    br_infos = utils_net.openflow_manager(br_name, "show").stdout.decode()
    if_port = re.findall(r"(\d+)\(%s\)" % if_name, br_infos)
    if not if_port:
        test.cancel("Can not find %s in bridge %s" % (if_name, br_name))
    if_port = if_port[0]

    acl_cmd = get_acl_cmd(acl_protocol, if_port, "drop", acl_extra_options)
    utils_net.openflow_manager(br_name, "add-flow", acl_cmd)
    acl_rules = utils_net.openflow_manager(br_name,
                                           "dump-flows").stdout.decode()
    if not acl_rules_check(acl_rules, acl_cmd):
        test.fail("Can not find the rules from ovs-ofctl: %s" % acl_rules)

    error_context.context("Try to acess target to exam the disable rules",
                          logging.info)
    access_service(access_sys, access_targets, True, host_ip)
    error_context.context("Enable the access in ovs", logging.info)
    acl_cmd = get_acl_cmd(acl_protocol, if_port, "normal", acl_extra_options)
    utils_net.openflow_manager(br_name, "mod-flows", acl_cmd)
    acl_rules = utils_net.openflow_manager(br_name,
                                           "dump-flows").stdout.decode()
    if not acl_rules_check(acl_rules, acl_cmd):
        test.fail("Can not find the rules from ovs-ofctl: %s" % acl_rules)

    error_context.context("Try to acess target to exam the enable rules",
                          logging.info)
    access_service(access_sys, access_targets, False, host_ip)
    error_context.context("Delete the access rules in ovs", logging.info)
    acl_cmd = get_acl_cmd(acl_protocol, if_port, "", acl_extra_options)
    utils_net.openflow_manager(br_name, "del-flows", acl_cmd)
    acl_rules = utils_net.openflow_manager(br_name,
                                           "dump-flows").stdout.decode()
    if acl_rules_check(acl_rules, acl_cmd):
        test.fail("Still can find the rules from ovs-ofctl: %s" % acl_rules)
    error_context.context("Try to acess target to exam after delete the rules",
                          logging.info)
    access_service(access_sys, access_targets, False, host_ip)

    for setup_target in params.get("setup_targets", "").split():
        stop_service(setup_target)
Exemple #33
0
def run(test, params, env):
    """
    Test multi disk suport of guest, this case will:
    1) Create disks image in configuration file.
    2) Start the guest with those disks.
    3) Checks qtree vs. test params. (Optional)
    4) Create partition on those disks.
    5) Get disk dev filenames in guest.
    6) Format those disks in guest.
    7) Copy file into / out of those disks.
    8) Compare the original file and the copied file using md5 or fc comand.
    9) Repeat steps 3-5 if needed.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _add_param(name, value):
        """ Converts name+value to stg_params string """
        if value:
            value = re.sub(' ', '\\ ', value)
            return " %s:%s " % (name, value)
        else:
            return ''

    def _do_post_cmd(session):
        cmd = params.get("post_cmd")
        if cmd:
            session.cmd_status_output(cmd)
        session.close()

    error_context.context("Parsing test configuration", logging.info)
    stg_image_num = 0
    stg_params = params.get("stg_params", "")
    # Compatibility
    stg_params += _add_param("image_size", params.get("stg_image_size"))
    stg_params += _add_param("image_format", params.get("stg_image_format"))
    stg_params += _add_param("image_boot", params.get("stg_image_boot", "no"))
    stg_params += _add_param("drive_format", params.get("stg_drive_format"))
    stg_params += _add_param("drive_cache", params.get("stg_drive_cache"))
    if params.get("stg_assign_index") != "no":
        # Assume 0 and 1 are already occupied (hd0 and cdrom)
        stg_params += _add_param("drive_index", 'range(2,n)')
    param_matrix = {}

    stg_params = stg_params.split(' ')
    i = 0
    while i < len(stg_params) - 1:
        if not stg_params[i].strip():
            i += 1
            continue
        if stg_params[i][-1] == '\\':
            stg_params[i] = '%s %s' % (stg_params[i][:-1],
                                       stg_params.pop(i + 1))
        i += 1

    rerange = []
    has_name = False
    for i in range(len(stg_params)):
        if not stg_params[i].strip():
            continue
        (cmd, parm) = stg_params[i].split(':', 1)
        if cmd == "image_name":
            has_name = True
        if _RE_RANGE1.match(parm):
            parm = _range(parm)
            if parm is False:
                test.error("Incorrect cfg: stg_params %s looks "
                           "like range(..) but doesn't contain "
                           "numbers." % cmd)
            param_matrix[cmd] = parm
            if type(parm) is str:
                # When we know the stg_image_num, substitute it.
                rerange.append(cmd)
                continue
        else:
            # ',' separated list of values
            parm = parm.split(',')
            j = 0
            while j < len(parm) - 1:
                if parm[j][-1] == '\\':
                    parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1))
                j += 1
            param_matrix[cmd] = parm
        stg_image_num = max(stg_image_num, len(parm))

    stg_image_num = int(params.get('stg_image_num', stg_image_num))
    for cmd in rerange:
        param_matrix[cmd] = _range(param_matrix[cmd], stg_image_num)
    # param_table* are for pretty print of param_matrix
    param_table = []
    param_table_header = ['name']
    if not has_name:
        param_table_header.append('image_name')
    for _ in param_matrix:
        param_table_header.append(_)

    stg_image_name = params.get('stg_image_name', 'images/%s')
    for i in range(stg_image_num):
        name = "stg%d" % i
        params['images'] += " %s" % name
        param_table.append([])
        param_table[-1].append(name)
        if not has_name:
            params["image_name_%s" % name] = stg_image_name % name
            param_table[-1].append(params.get("image_name_%s" % name))
        for parm in param_matrix.items():
            params['%s_%s' % (parm[0], name)] = str(parm[1][i % len(parm[1])])
            param_table[-1].append(params.get('%s_%s' % (parm[0], name)))

    if params.get("multi_disk_params_only") == 'yes':
        # Only print the test param_matrix and finish
        logging.info('Newly added disks:\n%s',
                     astring.tabular_output(param_table, param_table_header))
        return

    # Always recreate VMs and disks
    error_context.context("Start the guest with new disks", logging.info)
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        env_process.process_images(env_process.preprocess_image, test,
                                   vm_params)

    error_context.context("Start the guest with those disks", logging.info)
    vm = env.get_vm(params["main_vm"])
    login_timeout = int(params.get("login_timeout", 360))
    create_timeout = int(params.get("create_timeout", 1800))
    vm.create(timeout=create_timeout, params=params)
    session = vm.wait_for_login(timeout=login_timeout)

    n_repeat = int(params.get("n_repeat", "1"))
    file_system = [_.strip() for _ in params["file_system"].split()]
    cmd_timeout = float(params.get("cmd_timeout", 360))
    black_list = params["black_list"].split()
    drive_letters = int(params.get("drive_letters", "26"))
    stg_image_size = params["stg_image_size"]
    dd_test = params.get("dd_test", "no")
    pre_command = params.get("pre_command", "")
    labeltype = params.get("labeltype", "gpt")
    iozone_target_num = int(params.get('iozone_target_num', '5'))
    iozone_options = params.get('iozone_options')
    iozone_timeout = float(params.get('iozone_timeout', '7200'))

    have_qtree = True
    out = vm.monitor.human_monitor_cmd("info qtree", debug=False)
    if "unknown command" in str(out):
        have_qtree = False

    if (params.get("check_guest_proc_scsi") == "yes") and have_qtree:
        error_context.context("Verifying qtree vs. test params")
        err = 0
        qtree = qemu_qtree.QtreeContainer()
        qtree.parse_info_qtree(vm.monitor.info('qtree'))
        disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
        (tmp1, tmp2) = disks.parse_info_block(vm.monitor.info_block())
        err += tmp1 + tmp2
        err += disks.generate_params()
        err += disks.check_disk_params(params)
        (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(
            session.cmd_output('cat /proc/scsi/scsi'))
        err += tmp1 + tmp2

        if err:
            test.fail("%s errors occurred while verifying qtree vs."
                      " params" % err)
        if params.get('multi_disk_only_qtree') == 'yes':
            return
    try:
        err_msg = "Set disks num: %d" % stg_image_num
        err_msg += ", Get disks num in guest: %d"
        ostype = params["os_type"]
        if ostype == "windows":
            error_context.context("Get windows disk index that to "
                                  "be formatted", logging.info)
            disks = utils_disk.get_windows_disks_index(session, stg_image_size)
            if len(disks) < stg_image_num:
                test.fail("Fail to list all the volumes"
                          ", %s" % err_msg % len(disks))
            if len(disks) > drive_letters:
                black_list.extend(utils_misc.get_winutils_vol(session))
                disks = random.sample(disks, drive_letters - len(black_list))
            error_context.context("Clear readonly for all disks and online "
                                  "them in windows guest.", logging.info)
            if not utils_disk.update_windows_disk_attributes(session, disks):
                test.fail("Failed to update windows disk attributes.")
            dd_test = "no"
        else:
            error_context.context("Get linux disk that to be "
                                  "formatted", logging.info)
            disks = sorted(utils_disk.get_linux_disks(session).keys())
            if len(disks) < stg_image_num:
                test.fail("Fail to list all the volumes"
                          ", %s" % err_msg % len(disks))
    except Exception:
        _do_post_cmd(session)
        raise
    if iozone_options:
        iozone = generate_instance(params, vm, 'iozone')
        random.shuffle(disks)
    try:
        for i in range(n_repeat):
            logging.info("iterations: %s", (i + 1))
            for n, disk in enumerate(disks):
                error_context.context("Format disk in guest: '%s'" % disk,
                                      logging.info)
                # Random select one file system from file_system
                index = random.randint(0, (len(file_system) - 1))
                fstype = file_system[index].strip()
                partitions = utils_disk.configure_empty_disk(
                    session, disk, stg_image_size, ostype,
                    fstype=fstype, labeltype=labeltype)
                if not partitions:
                    test.fail("Fail to format disks.")
                cmd_list = params["cmd_list"]
                for partition in partitions:
                    orig_partition = partition
                    if "/" not in partition:
                        partition += ":"
                    else:
                        partition = partition.split("/")[-1]
                    error_context.context("Copy file into / out of partition:"
                                          " %s..." % partition, logging.info)
                    for cmd_l in cmd_list.split():
                        cmd = params.get(cmd_l)
                        if cmd:
                            session.cmd(cmd % partition, timeout=cmd_timeout)
                    cmd = params["compare_command"]
                    key_word = params["check_result_key_word"]
                    output = session.cmd_output(cmd)
                    if iozone_options and n < iozone_target_num:
                        iozone.run(iozone_options.format(orig_partition), iozone_timeout)
                    if key_word not in output:
                        test.fail("Files on guest os root fs and disk differ")
                    if dd_test != "no":
                        error_context.context("dd test on partition: %s..."
                                              % partition, logging.info)
                        status, output = session.cmd_status_output(
                            dd_test % (partition, partition), timeout=cmd_timeout)
                        if status != 0:
                            test.fail("dd test fail: %s" % output)
                    # When multiple SCSI disks are simulated by scsi_debug,
                    # they could be viewed as multiple paths to the same
                    # storage device. So need umount partition before operate
                    # next disk, in order to avoid corrupting the filesystem
                    # (xfs integrity checks error).
                    if ostype == "linux" and "scsi_debug add_host" in pre_command:
                        status, output = session.cmd_status_output(
                            "umount /dev/%s" % partition, timeout=cmd_timeout)
                        if status != 0:
                            test.fail("Failed to umount partition '%s': %s"
                                      % (partition, output))
            need_reboot = params.get("need_reboot", "no")
            need_shutdown = params.get("need_shutdown", "no")
            if need_reboot == "yes":
                error_context.context("Rebooting guest ...", logging.info)
                session = vm.reboot(session=session, timeout=login_timeout)
            if need_shutdown == "yes":
                error_context.context("Shutting down guest ...", logging.info)
                vm.graceful_shutdown(timeout=login_timeout)
                if vm.is_alive():
                    test.fail("Fail to shut down guest.")
                error_context.context("Start the guest again.", logging.info)
                vm = env.get_vm(params["main_vm"])
                vm.create(timeout=create_timeout, params=params)
                session = vm.wait_for_login(timeout=login_timeout)
            error_context.context("Delete partitions in guest.", logging.info)
            for disk in disks:
                utils_disk.clean_partition(session, disk, ostype)
    finally:
        if iozone_options:
            iozone.clean()
        _do_post_cmd(session)
Exemple #34
0
 def ethtool_restore_params(session, status):
     cur_stat = ethtool_get(session)
     if cur_stat != status:
         error_context.context("Restoring ethtool configuration",
                               test.log.info)
         ethtool_set(session, status)
Exemple #35
0
 def ethtool_save_params(session):
     error_context.context("Saving ethtool configuration", test.log.info)
     return ethtool_get(session)
Exemple #36
0
def run(test, params, env):
    """
    Test offload functions of ethernet device using ethtool

    1) Log into a guest.
    2) Saving ethtool configuration.
    3) Enable sub function of NIC.
    4) Execute callback function.
    5) Disable sub function of NIC.
    6) Run callback function again.
    7) Run file transfer test.
       7.1) Creating file in source host.
       7.2) Listening network traffic with tcpdump command.
       7.3) Transfer file.
       7.4) Comparing md5sum of the files on guest and host.
    8) Repeat step 3 - 7.
    9) Restore original configuration.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.

    @todo: Not all guests have ethtool installed, so
        find a way to get it installed using yum/apt-get/
        whatever
    """
    def ethtool_get(session):
        feature_pattern = {
            'tx': 'tx.*checksumming',
            'rx': 'rx.*checksumming',
            'sg': 'scatter.*gather',
            'tso': 'tcp.*segmentation.*offload',
            'gso': 'generic.*segmentation.*offload',
            'gro': 'generic.*receive.*offload',
            'lro': 'large.*receive.*offload',
        }

        o = session.cmd("ethtool -k %s" % ethname)
        status = {}
        for f in feature_pattern.keys():
            try:
                temp = re.findall("%s: (.*)" % feature_pattern.get(f), o)[0]
                if temp.find("[fixed]") != -1:
                    test.log.debug("%s is fixed", f)
                    continue
                status[f] = temp
            except IndexError:
                status[f] = None
                test.log.debug("(%s) failed to get status '%s'", ethname, f)

        test.log.debug("(%s) offload status: '%s'", ethname, str(status))
        return status

    def ethtool_set(session, status):
        """
        Set ethernet device offload status

        :param status: New status will be changed to
        """
        txt = "Set offload status for device "
        txt += "'%s': %s" % (ethname, str(status))
        error_context.context(txt, test.log.info)

        cmd = "ethtool -K %s " % ethname
        cmd += " ".join([o + ' ' + s for o, s in status.items()])
        err_msg = "Failed to set offload status for device '%s'" % ethname
        try:
            session.cmd_output_safe(cmd)
        except aexpect.ShellCmdError as e:
            test.log.error("%s, detail: %s", err_msg, e)
            return False

        curr_status = dict((k, v) for k, v in ethtool_get(session).items()
                           if k in status.keys())
        if curr_status != status:
            test.log.error("%s, got: '%s', expect: '%s'", err_msg,
                           str(curr_status), str(status))
            return False

        return True

    def ethtool_save_params(session):
        error_context.context("Saving ethtool configuration", test.log.info)
        return ethtool_get(session)

    def ethtool_restore_params(session, status):
        cur_stat = ethtool_get(session)
        if cur_stat != status:
            error_context.context("Restoring ethtool configuration",
                                  test.log.info)
            ethtool_set(session, status)

    def compare_md5sum(name):
        txt = "Comparing md5sum of the files on guest and host"
        error_context.context(txt, test.log.info)
        host_result = crypto.hash_file(name, algorithm="md5")
        try:
            o = session.cmd_output("md5sum %s" % name)
            guest_result = re.findall(r"\w+", o)[0]
        except IndexError:
            test.log.error("Could not get file md5sum in guest")
            return False
        test.log.debug("md5sum: guest(%s), host(%s)", guest_result,
                       host_result)
        return guest_result == host_result

    def transfer_file(src):
        """
        Transfer file by scp, use tcpdump to capture packets, then check the
        return string.

        :param src: Source host of transfer file
        :return: Tuple (status, error msg/tcpdump result)
        """
        sess = vm.wait_for_login(timeout=login_timeout)
        session.cmd_output("rm -rf %s" % filename)
        dd_cmd = ("dd if=/dev/urandom of=%s bs=1M count=%s" %
                  (filename, params.get("filesize")))
        failure = (False, "Failed to create file using dd, cmd: %s" % dd_cmd)
        txt = "Creating file in source host, cmd: %s" % dd_cmd
        error_context.context(txt, test.log.info)
        ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0))
        tcpdump_cmd = "tcpdump -lep -i %s -s 0 tcp -vv port ssh" % ethname
        if src == "guest":
            tcpdump_cmd += " and src %s" % guest_ip
            copy_files_func = vm.copy_files_from
            try:
                sess.cmd_output(dd_cmd, timeout=360)
            except aexpect.ShellCmdError as e:
                return failure
        else:
            tcpdump_cmd += " and dst %s" % guest_ip
            copy_files_func = vm.copy_files_to
            try:
                process.system(dd_cmd, shell=True)
            except process.CmdError as e:
                return failure

        # only capture the new tcp port after offload setup
        original_tcp_ports = re.findall(
            r"tcp.*:(\d+).*%s" % guest_ip,
            process.system_output("/bin/netstat -nap").decode())

        for i in original_tcp_ports:
            tcpdump_cmd += " and not port %s" % i

        txt = "Listening traffic using command: %s" % tcpdump_cmd
        error_context.context(txt, test.log.info)
        sess.sendline(tcpdump_cmd)
        if not utils_misc.wait_for(
                lambda: session.cmd_status("pgrep tcpdump") == 0, 30):
            return (False, "Tcpdump process wasn't launched")

        txt = "Transferring file %s from %s" % (filename, src)
        error_context.context(txt, test.log.info)
        try:
            copy_files_func(filename, filename)
        except remote.SCPError as e:
            return (False, "File transfer failed (%s)" % e)

        session.cmd("killall tcpdump")
        try:
            tcpdump_string = sess.read_up_to_prompt(timeout=60)
        except aexpect.ExpectError:
            return (False, "Failed to read tcpdump's output")

        if not compare_md5sum(filename):
            return (False, "Failure, md5sum mismatch")
        return (True, tcpdump_string)

    def tx_callback(status="on"):
        s, o = transfer_file("guest")
        if not s:
            test.log.error(o)
            return False
        return True

    def rx_callback(status="on"):
        s, o = transfer_file("host")
        if not s:
            test.log.error(o)
            return False
        return True

    def so_callback(status="on"):
        s, o = transfer_file("guest")
        if not s:
            test.log.error(o)
            return False
        error_context.context("Check if contained large frame", test.log.info)
        # MTU: default IPv4 MTU is 1500 Bytes, ethernet header is 14 Bytes
        return (status == "on") ^ (len(
            [i for i in re.findall(r"length (\d*):", o) if int(i) > mtu]) == 0)

    def ro_callback(status="on"):
        s, o = transfer_file("host")
        if not s:
            test.log.error(o)
            return False
        return True

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    error_context.context("Log into a guest.", test.log.info)
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)

    # Let's just error the test if we identify that there's no ethtool
    # installed
    error_context.context("Check whether ethtool installed in guest.")
    session.cmd("ethtool -h")
    mtu = 1514
    pretest_status = {}
    filename = "/tmp/ethtool.dd"
    guest_ip = vm.get_address()
    error_context.context("Try to get ethernet device name in guest.")
    ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0))

    supported_features = params.get("supported_features")
    if supported_features:
        supported_features = supported_features.split()
    else:
        test.error("No supported features set on the parameters")

    test_matrix = {
        # 'type: (callback,    (dependence), (exclude)'
        "tx": (tx_callback, (), ()),
        "rx": (rx_callback, (), ()),
        "sg": (tx_callback, ("tx", ), ()),
        "tso": (so_callback, (
            "tx",
            "sg",
        ), ("gso", )),
        "gso": (so_callback, (), ("tso", )),
        "gro": (ro_callback, ("rx", ), ("lro", )),
        "lro": (rx_callback, (), ("gro", )),
    }
    pretest_status = ethtool_save_params(session)
    failed_tests = []
    try:
        for f_type in supported_features:
            callback = test_matrix[f_type][0]

            offload_stat = {f_type: "on"}
            offload_stat.update(dict.fromkeys(test_matrix[f_type][1], "on"))
            # lro is fixed for e1000 and e1000e, while trying to exclude
            # lro by setting "lro off", the command of ethtool returns error
            if not (f_type == "gro" and
                    (vm.virtnet[0].nic_model == "e1000e"
                     or vm.virtnet[0].nic_model == "e1000")):
                offload_stat.update(
                    dict.fromkeys(test_matrix[f_type][2], "off"))
            if not ethtool_set(session, offload_stat):
                e_msg = "Failed to set offload status"
                test.log.error(e_msg)
                failed_tests.append(e_msg)

            txt = "Run callback function %s" % callback.__name__
            error_context.context(txt, test.log.info)

            # Some older kernel versions split packets by GSO
            # before tcpdump can capture the big packet, which
            # corrupts our results. Disable check when GSO is
            # enabled.
            if not callback(status="on") and f_type != "gso":
                e_msg = "Callback failed after enabling %s" % f_type
                test.log.error(e_msg)
                failed_tests.append(e_msg)

            if not ethtool_set(session, {f_type: "off"}):
                e_msg = "Failed to disable %s" % f_type
                test.log.error(e_msg)
                failed_tests.append(e_msg)
            txt = "Run callback function %s" % callback.__name__
            error_context.context(txt, test.log.info)
            if not callback(status="off"):
                e_msg = "Callback failed after disabling %s" % f_type
                test.log.error(e_msg)
                failed_tests.append(e_msg)

        if failed_tests:
            test.fail("Failed tests: %s" % failed_tests)

    finally:
        try:
            if session:
                session.close()
        except Exception as detail:
            test.log.error("Fail to close session: '%s'", detail)

        try:
            session = vm.wait_for_serial_login(timeout=login_timeout)
            ethtool_restore_params(session, pretest_status)
        except Exception as detail:
            test.log.warn("Could not restore parameter of"
                          " eth card: '%s'", detail)
Exemple #37
0
    def transfer_file(src):
        """
        Transfer file by scp, use tcpdump to capture packets, then check the
        return string.

        :param src: Source host of transfer file
        :return: Tuple (status, error msg/tcpdump result)
        """
        sess = vm.wait_for_login(timeout=login_timeout)
        session.cmd_output("rm -rf %s" % filename)
        dd_cmd = ("dd if=/dev/urandom of=%s bs=1M count=%s" %
                  (filename, params.get("filesize")))
        failure = (False, "Failed to create file using dd, cmd: %s" % dd_cmd)
        txt = "Creating file in source host, cmd: %s" % dd_cmd
        error_context.context(txt, test.log.info)
        ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0))
        tcpdump_cmd = "tcpdump -lep -i %s -s 0 tcp -vv port ssh" % ethname
        if src == "guest":
            tcpdump_cmd += " and src %s" % guest_ip
            copy_files_func = vm.copy_files_from
            try:
                sess.cmd_output(dd_cmd, timeout=360)
            except aexpect.ShellCmdError as e:
                return failure
        else:
            tcpdump_cmd += " and dst %s" % guest_ip
            copy_files_func = vm.copy_files_to
            try:
                process.system(dd_cmd, shell=True)
            except process.CmdError as e:
                return failure

        # only capture the new tcp port after offload setup
        original_tcp_ports = re.findall(
            r"tcp.*:(\d+).*%s" % guest_ip,
            process.system_output("/bin/netstat -nap").decode())

        for i in original_tcp_ports:
            tcpdump_cmd += " and not port %s" % i

        txt = "Listening traffic using command: %s" % tcpdump_cmd
        error_context.context(txt, test.log.info)
        sess.sendline(tcpdump_cmd)
        if not utils_misc.wait_for(
                lambda: session.cmd_status("pgrep tcpdump") == 0, 30):
            return (False, "Tcpdump process wasn't launched")

        txt = "Transferring file %s from %s" % (filename, src)
        error_context.context(txt, test.log.info)
        try:
            copy_files_func(filename, filename)
        except remote.SCPError as e:
            return (False, "File transfer failed (%s)" % e)

        session.cmd("killall tcpdump")
        try:
            tcpdump_string = sess.read_up_to_prompt(timeout=60)
        except aexpect.ExpectError:
            return (False, "Failed to read tcpdump's output")

        if not compare_md5sum(filename):
            return (False, "Failure, md5sum mismatch")
        return (True, tcpdump_string)
Exemple #38
0
def run(test, params, env):
    """
    Test Step:
        1. boot up three virtual machine
        2. transfer file from guest1 to guest2, check md5
        3. in guest 3 try to capture the packets(guest1 <-> guest2)
    Params:
        :param test: QEMU test object
        :param params: Dictionary with the test parameters
        :param env: Dictionary with test environment.
    """
    def _is_process_finished(session, process_name):
        """
        Check whether the target process is finished running
        param session: a guest session to send command
        param process_name: the target process name

        return: True if process does not exists,
                False if still exists
        """
        check_proc_cmd = check_proc_temp % process_name
        status, output = session.cmd_status_output(check_proc_cmd)
        if status:
            return False
        return process_name not in output

    def data_mon(session, cmd, timeout):
        try:
            session.cmd(cmd, timeout)
        except ShellCmdError as e:
            if re.findall(catch_date % (addresses[1], addresses[0]), str(e)):
                test.fail("God! Capture the transfet data:'%s'" % str(e))
            test.log.info("Guest3 catch data is '%s'", str(e))

    timeout = int(params.get("login_timeout", '360'))
    password = params.get("password")
    username = params.get("username")
    shell_port = params.get("shell_port")
    tmp_dir = params.get("tmp_dir", "/tmp/")
    clean_cmd = params.get("clean_cmd", "rm -f")
    filesize = int(params.get("filesize", '100'))

    wireshark_name = params.get("wireshark_name")
    check_proc_temp = params.get("check_proc_temp")
    tcpdump_cmd = params.get("tcpdump_cmd")
    dd_cmd = params.get("dd_cmd")
    catch_date = params.get("catch_data", "%s.* > %s.ssh")
    md5_check = params.get("md5_check", "md5sum %s")
    mon_process_timeout = int(params.get("mon_process_timeout", "1200"))
    sessions = []
    addresses = []
    vms = []

    error_context.context("Init boot the vms")
    for vm_name in params.get("vms", "vm1 vm2 vm3").split():
        vms.append(env.get_vm(vm_name))
    for vm in vms:
        vm.verify_alive()
        sessions.append(vm.wait_for_login(timeout=timeout))
        addresses.append(vm.get_address())
    mon_session = vms[2].wait_for_login(timeout=timeout)
    mon_macaddr = vms[2].get_mac_address()

    src_file = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8))
    dst_file = (tmp_dir + "dst-%s" % utils_misc.generate_random_string(8))

    try:
        # Before transfer, run tcpdump to try to catche data
        error_msg = "In guest3, try to capture the packets(guest1 <-> guest2)"
        error_context.context(error_msg, test.log.info)
        if params.get("os_type") == "linux":
            if_func = utils_net.get_linux_ifname
            args = (mon_session, mon_macaddr)
        else:
            if_func = utils_net.get_windows_nic_attribute
            args = (mon_session, "macaddress", mon_macaddr, "netconnectionid")
            error_context.context("Install wireshark", test.log.info)
            install_wireshark_cmd = params.get("install_wireshark_cmd")
            install_wireshark_cmd = utils_misc.set_winutils_letter(
                sessions[2], install_wireshark_cmd)
            status, output = sessions[2].cmd_status_output(
                install_wireshark_cmd, timeout=timeout)
            if status:
                test.error(
                    "Failed to install wireshark, status=%s, output=%s" %
                    (status, output))
            test.log.info("Wait for wireshark installation to complete")
            utils_misc.wait_for(
                lambda: _is_process_finished(sessions[2], wireshark_name),
                timeout, 20, 3)
            test.log.info("Wireshark is already installed")
        interface_name = if_func(*args)
        tcpdump_cmd = tcpdump_cmd % (addresses[1], addresses[0],
                                     interface_name)
        dthread = utils_misc.InterruptedThread(
            data_mon, (sessions[2], tcpdump_cmd, mon_process_timeout))

        test.log.info("Tcpdump mon start ...")
        test.log.info("Creating %dMB file on guest1", filesize)
        sessions[0].cmd(dd_cmd % (src_file, filesize), timeout=timeout)
        dthread.start()

        error_context.context("Transferring file guest1 -> guest2",
                              test.log.info)
        if params.get("os_type") == "windows":
            cp_cmd = params["copy_cmd"]
            cp_cmd = cp_cmd % (addresses[1], params['file_transfer_port'],
                               src_file, dst_file)
            sessions[0].cmd_output(cp_cmd)
        else:
            remote.scp_between_remotes(addresses[0], addresses[1], shell_port,
                                       password, password, username, username,
                                       src_file, dst_file)

        error_context.context("Check the src and dst file is same",
                              test.log.info)
        src_md5 = sessions[0].cmd_output(md5_check % src_file).split()[0]
        dst_md5 = sessions[1].cmd_output(md5_check % dst_file).split()[0]

        if dst_md5 != src_md5:
            debug_msg = "Files md5sum mismatch!"
            debug_msg += "source file md5 is '%s', after transfer md5 is '%s'"
            test.fail(debug_msg % (src_md5, dst_md5), test.log.info)
        test.log.info("Files md5sum match, file md5 is '%s'", src_md5)

        error_context.context("Checking network private", test.log.info)
        tcpdump_check_cmd = params["tcpdump_check_cmd"]
        tcpdump_kill_cmd = params["tcpdump_kill_cmd"]
        tcpdump_check_cmd = re.sub("ADDR0", addresses[0], tcpdump_check_cmd)
        tcpdump_check_cmd = re.sub("ADDR1", addresses[1], tcpdump_check_cmd)
        status = mon_session.cmd_status(tcpdump_check_cmd)
        if status:
            test.error("Tcpdump process terminate exceptly")
        mon_session.cmd(tcpdump_kill_cmd)
        dthread.join()

    finally:
        sessions[0].cmd(" %s %s " % (clean_cmd, src_file))
        sessions[1].cmd(" %s %s " % (clean_cmd, src_file))
        if mon_session:
            mon_session.close()
        for session in sessions:
            if session:
                session.close()
Exemple #39
0
def run(test, params, env):
    """
    Simple test to check if NUMA options are being parsed properly
    1) Boot vm with different numa nodes
    2) With qemu monitor, check if size and cpus for every node match with cli
    3) In guest os, check if size and cpus for every node match with cli

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def numa_info_guest():
        """
        The numa info in guest os, linux only

        return: An array of (ram, cpus) tuples, where ram is the RAM size in
                MB and cpus is a set of CPU numbers
        """

        numa_info_guest = NumaInfo(session=session)

        numa_guest = []
        nodes_guest = numa_info_guest.online_nodes
        for node in nodes_guest:
            node_size = numa_info_guest.online_nodes_meminfo[node]['MemTotal']
            node_size = float(normalize_data_size('%s KB' % node_size))
            node_cpus = numa_info_guest.online_nodes_cpus[node]
            node_cpus = set([int(v) for v in node_cpus.split()])
            numa_guest.append((node_size, node_cpus))

        # It is a known WONTFIX issue for x86, node info of node0 and node1 is
        # opposite in guest os when vm have 2 nodes
        if (vm_arch in ("x86_64", "i686") and len(numa_guest) == 2):
            numa_guest.reverse()
        return numa_guest

    vm = env.get_vm(params["main_vm"])
    os_type = params["os_type"]
    vm_arch = params["vm_arch_name"]
    session = vm.wait_for_login()

    error_context.context("starting numa_opts test...", logging.info)

    # Get numa info from monitor
    numa_monitor = vm.monitors[0].info_numa()
    error_context.context("numa info in monitor: %r" % numa_monitor,
                          logging.info)
    monitor_expect_nodes = params.get_numeric("monitor_expect_nodes")
    if len(numa_monitor) != monitor_expect_nodes:
        test.fail("[Monitor]Wrong number of numa nodes: %d. Expected: %d" %
                  (len(numa_monitor), monitor_expect_nodes))

    if os_type == 'linux':
        # Get numa info in guest os, only for Linux
        numa_guest = numa_info_guest()
        error_context.context("numa info in guest: %r" % numa_guest,
                              logging.info)
        guest_expect_nodes = int(
            params.get("guest_expect_nodes", monitor_expect_nodes))
        if len(numa_guest) != guest_expect_nodes:
            test.fail("[Guest]Wrong number of numa nodes: %d. Expected: %d" %
                      (len(numa_guest), guest_expect_nodes))
        # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold
        MemTotal = get_mem_info(session, 'MemTotal')
        MemTotal = float(normalize_data_size('%s KB' % MemTotal))
        error_context.context("MemTotal in guest os is %s MB" % MemTotal,
                              logging.info)
        threshold = float(params.get_numeric("mem") - MemTotal) + 30
        error_context.context("The acceptable threshold is: %s" % threshold,
                              logging.info)
    else:
        numa_guest = numa_monitor
    session.close()

    for nodenr, node in enumerate(numa_guest):
        mdev = params.get("numa_memdev_node%d" % (nodenr))
        if mdev:
            mdev = mdev.split('-')[1]
            size = float(normalize_data_size(params.get("size_%s" % mdev)))
        else:
            size = params.get_numeric("mem")

        cpus = params.get("numa_cpus_node%d" % (nodenr))
        if cpus is not None:
            cpus = set([int(v) for v in cpus.split(",")])
        else:
            cpus = set([int(v) for v in range(params.get_numeric('smp'))])

        if len(numa_monitor) != 0:
            if size != numa_monitor[nodenr][0]:
                test.fail(
                    "[Monitor]Wrong size of numa node %d: %f. Expected: %f" %
                    (nodenr, numa_monitor[nodenr][0], size))
            if cpus != numa_monitor[nodenr][1]:
                test.fail(
                    "[Monitor]Wrong CPU set on numa node %d: %s. Expected: %s"
                    % (nodenr, numa_monitor[nodenr][1], cpus))

        if os_type == 'linux':
            if size - numa_guest[nodenr][0] > threshold:
                test.fail(
                    "[Guest]Wrong size of numa node %d: %f. Expected: %f" %
                    (nodenr, numa_guest[nodenr][0], size))
            if cpus != numa_guest[nodenr][1]:
                test.fail(
                    "[Guest]Wrong CPU set on numa node %d: %s. Expected: %s" %
                    (nodenr, numa_guest[nodenr][1], cpus))
def run(test, params, env):
    """
    Test hotplug vcpu devices and execute stress test.

    1) Boot up guest without vcpu device.
    2) Hotplug vcpu devices and check successfully or not. (qemu side)
    3) Check if the number of CPUs in guest changes accordingly. (guest side)
    4) Execute stress test on all hotplugged vcpu devices
    5) Hotunplug vcpu devices during stress test
    6) Recheck the number of CPUs in guest.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    def heavyload_install():
        if session.cmd_status(test_installed_cmd) != 0:
            logging.warning("Could not find installed heavyload in guest, will"
                            " install it via winutils.iso ")
            winutil_drive = utils_misc.get_winutils_vol(session)
            if not winutil_drive:
                test.cancel("WIN_UTILS CDROM not found.")
            install_cmd = params["install_cmd"] % winutil_drive
            session.cmd(install_cmd)

    os_type = params["os_type"]
    login_timeout = params.get_numeric("login_timeout", 360)
    stress_duration = params.get_numeric("stress_duration", 180)
    verify_wait_timeout = params.get_numeric("verify_wait_timeout", 60)
    vcpu_devices = params.objects("vcpu_devices")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)
    smp = vm.cpuinfo.smp
    maxcpus = vm.cpuinfo.maxcpus

    guest_cpu_ids = cpu_utils.get_guest_cpu_ids(session, os_type)
    for vcpu_dev in vcpu_devices:
        error_context.context("Hotplug vcpu device: %s" % vcpu_dev,
                              logging.info)
        vm.hotplug_vcpu_device(vcpu_dev)
    if not utils_misc.wait_for(lambda: vm.get_cpu_count() == maxcpus,
                               verify_wait_timeout):
        test.fail("Actual number of guest CPUs is not equal to expected")

    if os_type == "linux":
        stress_args = params["stress_args"]
        stress_tool = cpu_utils.VMStressBinding(vm,
                                                params,
                                                stress_args=stress_args)
        current_guest_cpu_ids = cpu_utils.get_guest_cpu_ids(session, os_type)
        plugged_cpu_ids = list(current_guest_cpu_ids - guest_cpu_ids)
        plugged_cpu_ids.sort()
        for cpu_id in plugged_cpu_ids:
            error_context.context(
                "Run stress on vCPU(%d) inside guest." % cpu_id, logging.info)
            stress_tool.load_stress_tool(cpu_id)
        error_context.context(
            "Successfully launched stress sessions, execute "
            "stress test for %d seconds" % stress_duration, logging.info)
        time.sleep(stress_duration)
        if utils_package.package_install("sysstat", session):
            error_context.context("Check usage of guest CPUs", logging.info)
            mpstat_cmd = "mpstat 1 5 -P %s | cat" % ",".join(
                map(str, plugged_cpu_ids))
            mpstat_out = session.cmd_output(mpstat_cmd)
            cpu_stat = dict(
                re.findall(r"Average:\s+(\d+)\s+(\d+\.\d+)", mpstat_out, re.M))
            for cpu_id in plugged_cpu_ids:
                cpu_usage_rate = float(cpu_stat[str(cpu_id)])
                if cpu_usage_rate < 50:
                    test.error("Stress test on vCPU(%s) failed, usage rate: "
                               "%.2f%%" % (cpu_id, cpu_usage_rate))
                logging.info("Usage rate of vCPU(%s) is: %.2f%%", cpu_id,
                             cpu_usage_rate)
        for vcpu_dev in vcpu_devices:
            error_context.context("Hotunplug vcpu device: %s" % vcpu_dev,
                                  logging.info)
            vm.hotunplug_vcpu_device(vcpu_dev)
            # Drift the running stress task to other vCPUs
            time.sleep(random.randint(5, 10))
        if vm.get_cpu_count() != smp:
            test.fail("Actual number of guest CPUs is not equal to expected")
        stress_tool.unload_stress()
        stress_tool.clean()
    else:
        install_path = params["install_path"]
        test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path
        heavyload_install()
        error_context.context("Run heavyload inside guest.", logging.info)
        heavyload_bin = r'"%s\heavyload.exe" ' % install_path
        heavyload_options = [
            "/CPU %d" % maxcpus,
            "/DURATION %d" % (stress_duration // 60), "/AUTOEXIT", "/START"
        ]
        start_cmd = heavyload_bin + " ".join(heavyload_options)
        stress_tool = BackgroundTest(
            session.cmd, (start_cmd, stress_duration, stress_duration))
        stress_tool.start()
        if not utils_misc.wait_for(
                stress_tool.is_alive, verify_wait_timeout, first=5):
            test.error("Failed to start heavyload process.")
        stress_tool.join(stress_duration)

    session.close()
Exemple #41
0
def run(test, params, env):
    """
    Vsock basic function test

    1. Boot guest with vhost-vsock-pci device
    2. Download and compile nc-vsock on both guest and host if needed
    3. Start listening inside guest
    4. Connect guest CID from host
    5. Input character, e.g. 'Hello world'
    6. Check if guest receive the content correctly

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def clean(tmp_file):
        """ Clean the environment """
        cmd_rm = "rm -rf %s" % tmp_file
        if vsock_test_tool == "nc_vsock":
            cmd_rm += "; rm -rf %s*" % tool_bin
        session.cmd_output_safe(cmd_rm)
        process.system(cmd_rm, shell=True, ignore_status=True)
        if host_vsock_session.is_alive():
            host_vsock_session.close()
        session.close()

    vm = env.get_vm(params["main_vm"])
    tmp_file = "/tmp/vsock_file_%s" % utils_misc.generate_random_string(6)
    session = vm.wait_for_login()
    vsock_dev = params["vsocks"].split()[0]
    guest_cid = vm.devices.get(vsock_dev).get_param("guest-cid")
    port = random.randrange(1, 6000)
    vsock_test_tool = params["vsock_test_tool"]

    if vsock_test_tool == "ncat":
        tool_bin = path.find_command("ncat")
        vsock_listen(tool_bin, port, session)
        host_vsock_session = vsock_connect(tool_bin, guest_cid, port)

    if vsock_test_tool == "nc_vsock":
        tool_bin = compile_nc_vsock(test, vm, session)
        vsock_listen(tool_bin, port, session)
        host_vsock_session = vsock_connect(tool_bin, guest_cid, port)
        connected_str = r"Connection from cid*"
        check_received_data(test, session, connected_str)

    send_data = "Hello world"
    error_context.context('Input "Hello world" to vsock.', logging.info)
    host_vsock_session.sendline(send_data)
    check_received_data(test, session, send_data)
    host_vsock_session.close()
    check_guest_vsock_conn_exit(test, session, close_session=True)

    # Transfer data from guest to host
    session = vm.wait_for_login()
    rec_session = send_data_from_guest_to_host(session, tool_bin, guest_cid,
                                               tmp_file)
    utils_misc.wait_for(lambda: not rec_session.is_alive(), timeout=20)
    check_guest_vsock_conn_exit(test, session)
    cmd_chksum = 'md5sum %s' % tmp_file
    md5_origin = session.cmd_output(cmd_chksum).split()[0]
    md5_received = process.system_output(cmd_chksum).split()[0].decode()
    if md5_received != md5_origin:
        clean(tmp_file)
        test.fail('Data transfer not integrated, the original md5 value'
                  ' is %s, while the md5 value received on host is %s' %
                  (md5_origin, md5_received))
    clean(tmp_file)
Exemple #42
0
def run(test, params, env):
    """
    KVM block resize test:

    1) Start guest with both data disk and system disk.
    2) Extend/shrink data disk in guest.
    3) Verify the data disk size match expected size.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def verify_disk_size(session, os_type, disk):
        """
        Verify the current block size match with the expected size.
        """
        current_size = utils_disk.get_disk_size(session, os_type, disk)
        accept_ratio = float(params.get("accept_ratio", 0))
        if (current_size <= block_size
                and current_size >= block_size * (1 - accept_ratio)):
            logging.info(
                "Block Resizing Finished !!! \n"
                "Current size %s is same as the expected %s", current_size,
                block_size)
            return True
        else:
            logging.error("Current: %s\nExpect: %s\n", current_size,
                          block_size)
            return False

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    os_type = params["os_type"]
    img_size = params.get("image_size_stg", "10G")
    data_image = params.get("images").split()[-1]
    data_image_params = params.object_params(data_image)
    img = qemu_storage.QemuImg(data_image_params, data_dir.get_data_dir(),
                               data_image)
    filters = {}
    data_image_dev = ""
    if vm.check_capability(Flags.BLOCKDEV):
        filters = {"driver": data_image_params.get("image_format", "qcow2")}
    else:
        filters = {"file": img.image_filename}

    # get format node-name(-blockdev) or device name(-drive)
    for dev in vm.devices.get_by_params(filters):
        if dev.aobject == data_image:
            data_image_dev = dev.get_qid()

    if not data_image_dev:
        test.error("Cannot find device to resize.")

    block_virtual_size = json.loads(img.info(force_share=True,
                                             output="json"))["virtual-size"]

    session = vm.wait_for_login(timeout=timeout)
    disk = sorted(utils_disk.get_linux_disks(session).keys())[0]

    for ratio in params.objects("disk_change_ratio"):
        block_size = int(int(block_virtual_size) * float(ratio))
        error_context.context("Change disk size to %s in monitor" % block_size,
                              logging.info)

        if vm.check_capability(Flags.BLOCKDEV):
            args = (None, block_size, data_image_dev)
        else:
            args = (data_image_dev, block_size)
        vm.monitor.block_resize(*args)

        # to apply the new size
        if params.get("guest_prepare_cmd", ""):
            session.cmd(params.get("guest_prepare_cmd"))
        if params.get("need_reboot") == "yes":
            session = vm.reboot(session=session)

        if not wait.wait_for(lambda: verify_disk_size(session, os_type, disk),
                             20, 0, 1, "Block Resizing"):
            test.fail("The current block size is not the same as expected.\n")

    session.close()
Exemple #43
0
def run(test, params, env):
    """
    Qemu balloon device stress test:
    1) boot guest with balloon device
    2) enable driver verifier in guest (Windows only)
    3) run stress in background repeatly
    4) balloon memory in monitor in loop

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_bg_running():
        """
        Check the background test status in guest.
        :return: return True if find the process name; otherwise False
        """
        if params['os_type'] == 'windows':
            list_cmd = params.get("list_cmd", "wmic process get name")
            output = session.cmd_output_safe(list_cmd, timeout=60)
            process = re.findall("mplayer", output, re.M | re.I)
            return bool(process)
        else:
            return stress_bg.app_running()

    error_context.context("Boot guest with balloon device", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = float(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    if params['os_type'] == 'windows':
        driver_name = params["driver_name"]
        session = utils_test.qemu.windrv_check_running_verifier(
            session, vm, test, driver_name, timeout)
        balloon_test = BallooningTestWin(test, params, env)
    else:
        balloon_test = BallooningTestLinux(test, params, env)

    error_context.context("Run stress background", logging.info)
    stress_test = params.get("stress_test")
    if params['os_type'] == 'windows':
        utils_test.run_virt_sub_test(test, params, env, stress_test)
        if not utils_misc.wait_for(check_bg_running,
                                   first=2.0,
                                   text="wait for stress app to start",
                                   step=1.0,
                                   timeout=60):
            test.error("Run stress background failed")
    else:
        stress_bg = utils_test.VMStress(vm, "stress", params)
        stress_bg.load_stress_tool()

    repeat_times = int(params.get("repeat_times", 1000))
    min_sz, max_sz = balloon_test.get_memory_boundary()

    error_context.context("balloon vm memory in loop", logging.info)
    try:
        for i in range(1, int(repeat_times + 1)):
            logging.info("repeat times: %d", i)
            balloon_test.balloon_memory(int(random.uniform(min_sz, max_sz)))
            if not check_bg_running():
                test.error("Background stress process is not alive")
    finally:
        if session:
            session.close()
Exemple #44
0
def run(test, params, env):
    """
    Emulate the poweroff under IO workload(dd so far) with signal SIGKILL.

    1) Boot a VM
    2) Add IO workload for guest OS
    3) Sleep for a random time
    4) Kill the VM
    5) Check the image to verify if errors are found except some cluster leaks

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)
    session2 = vm.wait_for_login(timeout=login_timeout)

    bg_cmd = params.get("background_cmd")
    error_context.context("Add IO workload for guest OS.", logging.info)
    session.cmd_output(bg_cmd, timeout=60)

    error_context.context("Verify the background process is running")
    check_cmd = params.get("check_cmd")
    session2.cmd(check_cmd, timeout=360)

    error_context.context("Sleep for a random time", logging.info)
    time.sleep(random.randrange(30, 100))
    session2.cmd(check_cmd, timeout=360)

    error_context.context("Kill the VM", logging.info)
    vm.process.close()

    error_context.context("Check img after kill VM", logging.info)
    base_dir = data_dir.get_data_dir()
    image_name = params.get("image_name")
    image = qemu_storage.QemuImg(params, base_dir, image_name)
    try:
        image.check_image(params, base_dir)
    except Exception as e:
        if "Leaked clusters" not in e.message:
            raise
        error_context.context("Detected cluster leaks, try to repair it",
                              logging.info)
        restore_cmd = params.get("image_restore_cmd") % image.image_filename
        cmd_status = process.system(restore_cmd, shell=True)
        if cmd_status:
            test.fail("Failed to repair cluster leaks on the image")
Exemple #45
0
 def check_irqbalance_status():
     """ Check the status of irqbalance service. """
     error_context.context("Check irqbalance service status.", logging.info)
     return re.findall("Active: active", session.cmd_output(status_cmd))
Exemple #46
0
def run(test, params, env):
    """
    [Mlock] Basic test, this case will:
    1) Get nr_mlock and nr_unevictable in host before VM start.
    2) Start the VM.
    3) Get nr_mlock and nr_unevictable in host after VM start.
    4) Check nr_mlock and nr_unevictable with VM memory.
    5) Check kernel crash

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_mlock_unevictable(mlock_cmd, unevictable_cmd):
        """
        Get nr_mlock and nr_unevictable in host

        :param mlock_cmd: CMD to get nr_mlock
        :param unevictable_cmd: CMD to get nr_unevictable
        """
        mlock = int(process.system_output(mlock_cmd).split().pop())
        unevictable = int(process.system_output(unevictable_cmd).split().pop())
        return mlock, unevictable

    mlock_cmd = params["mlock_cmd"]
    unevictable_cmd = params["unevictable_cmd"]
    vm_mem = int(params["mem"])

    error_context.context(
        "Get nr_mlock and nr_unevictable in host before VM start!",
        logging.info)
    mlock_pre, unevictable_pre = get_mlock_unevictable(mlock_cmd,
                                                       unevictable_cmd)
    logging.info("mlock_pre is %d and unevictable_pre is %d.", mlock_pre,
                 unevictable_pre)
    params["start_vm"] = "yes"

    error_context.context("Starting VM!", logging.info)
    env_process.preprocess_vm(test, params, env, params["main_vm"])
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    error_context.context(
        "Get nr_mlock and nr_unevictable in host after VM start!",
        logging.info)
    mlock_post, unevictable_post = get_mlock_unevictable(
        mlock_cmd, unevictable_cmd)
    logging.info("mlock_post is %d and unevictable_post is %d.", mlock_post,
                 unevictable_post)

    realtime_mlock = params["realtime_mlock"]
    if realtime_mlock == "on":
        nr_mlock = mlock_post - mlock_pre
        vm_pages = vm_mem * 1024 * 1024 / getpagesize()
        if nr_mlock < vm_pages:
            test.fail(
                "nr_mlock is not fit with VM memory when mlock is %s! nr_mlock = %d, vm_mem = %d."
                % (realtime_mlock, nr_mlock, vm_mem))
        nr_unevictable = unevictable_post - unevictable_pre
        if nr_unevictable < vm_pages:
            test.fail(
                "nr_unevictable is not fit with VM memory when mlock is %s! nr_unevictable = %d, vm_mem = %d."
                % (realtime_mlock, nr_unevictable, vm_mem))
    else:
        if mlock_post != mlock_pre:
            test.fail(
                "mlock_post is not equal to mlock_pre when mlock is %s!" %
                realtime_mlock)
        if unevictable_post != unevictable_pre:
            test.fail(
                "unevictable_post is not equal to unevictable_pre when mlock is %s!"
                % realtime_mlock)

    error_context.context("Check kernel crash message!", logging.info)
    vm.verify_kernel_crash()
Exemple #47
0
 def check_interrupts():
     """ Check the interrupt queues in guest. """
     error_context.context("Check the interrupt queues in guest.",
                           logging.info)
     return session.cmd_output(irq_check_cmd)
Exemple #48
0
def run(test, params, env):
    """
    Time clock offset check when guest crash/bsod test:

    1) boot guest with '-rtc base=utc,clock=host,driftfix=slew';
    2) sync host system time with ntp server;
    3) inject nmi to guest/ make linux kernel crash;
    4) sleep long time, then reset vm via system_reset;
    5) query clock offset from ntp server;

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    ntp_cmd = params["ntp_cmd"]
    ntp_query_cmd = params["ntp_query_cmd"]
    nmi_cmd = params.get("nmi_cmd", "inject-nmi")
    sleep_time = float(params.get("sleep_time", 1800))
    deviation = float(params.get("deviation", 5))
    os_type = params["os_type"]
    ntp_host_cmd = params.get("ntp_host_cmd", ntp_cmd)

    error_context.context("sync host time with ntp server", logging.info)
    process.system(ntp_host_cmd, shell=True)

    error_context.context("start guest", logging.info)
    params["start_vm"] = "yes"
    preprocess(test, params, env)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    error_context.context("sync time in guest", logging.info)
    if os_type == "windows":
        utils_test.start_windows_service(session, "w32time")
    session.cmd(ntp_cmd)

    error_context.context("inject nmi interrupt in vm", logging.info)
    target, cmd = re.split(r"\s*:\s*", nmi_cmd)
    if target == "monitor":
        vm.monitor.send_args_cmd(cmd)
    else:
        session.sendline(cmd)
    try:
        session.cmd("dir")
    except Exception:
        pass
    else:
        test.fail("Guest OS still alive ...")

    error_context.context("sleep %s seconds" % sleep_time, logging.info)
    time.sleep(sleep_time)
    # Autotest parses serial output and could raise VMDeadKernelCrash
    # we generated using sysrq. Ignore one "BUG:" line
    vm.resume()
    try:
        session = vm.reboot(method="system_reset")
    except VMDeadKernelCrashError as details:
        details = str(details)
        if (re.findall(r"Trigger a crash\s.*BUG:", details, re.M)
                and details.count("BUG:") != 1):
            test.fail("Got multiple kernel crashes. Please "
                      "note that one of them was "
                      "intentionally  generated by sysrq in "
                      "this test.\n%s" % details)
        end_time = time.time() + timeout
        while time.time() < end_time:
            try:
                session = vm.wait_for_login(timeout=timeout)
            except VMDeadKernelCrashError as details:
                details = str(details)
                if (re.findall(r"Trigger a crash\s.*BUG:", details, re.M)
                        and details.count("BUG:") != 1):
                    test.fail("Got multiple kernel crashes. "
                              "Please note that one of them was "
                              "intentionally  generated by sysrq "
                              "in this test.\n%s" % details)
            else:
                break

    error_context.context("check time offset via ntp", logging.info)
    output = session.cmd_output(ntp_query_cmd)
    try:
        offset = re.findall(r"[+-]?(\d+\.\d+)", output, re.M)[-1]
    except IndexError:
        offset = 0.0
    if float(offset) > deviation:
        test.fail("Unacceptable offset '%s', " % offset +
                  "deviation '%s'" % deviation)
def run(test, params, env):
    """
    Qemu allocate hugepage from specify node.
    Steps:
    1) Setup total of 4G mem hugepages for specify node.
    2) Setup total of 1G mem hugepages for idle node.
    3) Mount this hugepage to /mnt/kvm_hugepage.
    4) Boot guest only allocate hugepage from specify node.
    5) Check the hugepage used from every node.
    :params test: QEMU test object.
    :params params: Dictionary with the test parameters.
    :params env: Dictionary with test environment.
    """
    memory.drop_caches()
    hugepage_size = memory.get_huge_page_size()
    mem_size = int(normalize_data_size("%sM" % params["mem"], "K"))
    idle_node_mem = int(
        normalize_data_size("%sM" % params["idle_node_mem"], "K"))

    error_context.context("Get host numa topological structure.", logging.info)
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.get_online_nodes_withmem()
    idle_node_list = node_list.copy()
    node_meminfo = host_numa_node.get_all_node_meminfo()

    for node_id in node_list:
        error_context.base_context(
            "Check preprocess HugePages Free on host "
            "numa node %s." % node_id, logging.info)
        node_memfree = int(node_meminfo[node_id]["MemFree"])
        if node_memfree < idle_node_mem:
            idle_node_list.remove(node_id)
        if node_memfree < mem_size:
            node_list.remove(node_id)

    if len(idle_node_list) < 2 or not node_list:
        test.cancel("Host node does not have enough nodes to run the test, "
                    "skipping test...")

    for node_id in node_list:
        error_context.base_context(
            "Specify qemu process only allocate "
            "HugePages from node%s." % node_id, logging.info)
        params["target_nodes"] = "%s" % node_id
        params["target_num_node%s" % node_id] = math.ceil(mem_size /
                                                          hugepage_size)
        error_context.context("Setup huge pages for specify node%s." % node_id,
                              logging.info)
        check_list = [_ for _ in idle_node_list if _ != node_id]
        for idle_node in check_list:
            params["target_nodes"] += " %s" % idle_node
            params["target_num_node%s" % idle_node] = math.ceil(idle_node_mem /
                                                                hugepage_size)
            error_context.context(
                "Setup huge pages for idle node%s." % idle_node, logging.info)
        params["setup_hugepages"] = "yes"
        hp_config = test_setup.HugePageConfig(params)
        hp_config.setup()
        params["qemu_command_prefix"] = "numactl --membind=%s" % node_id
        params["start_vm"] = "yes"
        env_process.preprocess_vm(test, params, env, params["main_vm"])
        try:
            vm = env.get_vm(params["main_vm"])
            vm.verify_alive()
            vm.wait_for_login()

            meminfo = host_numa_node.get_all_node_meminfo()
            for index in check_list:
                error_context.base_context(
                    "Check process HugePages Free on host "
                    "numa node %s." % index, logging.info)
                hugepages_free = int(meminfo[index]["HugePages_Free"])
                if int(node_meminfo[index]["HugePages_Free"]) > hugepages_free:
                    test.fail("Qemu still use HugePages from other node."
                              "Expect: node%s, used: node%s." %
                              (node_id, index))
        finally:
            vm.destroy()
            hp_config.cleanup()
Exemple #50
0
def run(test, params, env):
    """
    Qemu multiqueue test for virtio-scsi controller:

    1) Boot up a guest with virtio-scsi device which support multi-queue and
       the vcpu and images number of guest should match the multi-queue number.
    2) Pin the vcpus to the host cpus.
    3) Check the multi queue option from monitor.
    4) Check device init status in guest
    5) Pin the interrupts to the vcpus.
    6) Load I/O in all targets.
    7) Check the interrupt queues in guest.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_mapping_interrupts2vcpus(irqs, pattern):
        """ Get the mapping of between virtio interrupts and vcpus. """
        regex = r'(\d+):(\s+(\d+\s+){%d})\s+.+\s%s\s' % (len(
            re.findall(r"\s+CPU\d+", irqs, re.M)), pattern)
        return {
            f[0]: {
                'count': f[1].split()
            }
            for f in re.findall(regex, irqs, re.M)
        }

    def create_data_images():
        """ Create date image objects. """
        for extra_image in range(images_num):
            image_tag = "stg%s" % extra_image
            params["images"] += " %s" % image_tag
            params["image_name_%s" % image_tag] = "images/%s" % image_tag
            params["image_size_%s" % image_tag] = extra_image_size
            params["force_create_image_%s" % image_tag] = "yes"
            image_params = params.object_params(image_tag)
            env_process.preprocess_image(test, image_params, image_tag)

    def check_irqbalance_status():
        """ Check the status of irqbalance service. """
        error_context.context("Check irqbalance service status.", logging.info)
        return re.findall("Active: active", session.cmd_output(status_cmd))

    def start_irqbalance_service():
        """ Start the irqbalance service. """
        error_context.context("Start the irqbalance service.", logging.info)
        session.cmd("systemctl start irqbalance")
        output = utils_misc.strip_console_codes(session.cmd_output(status_cmd))
        if not re.findall("Active: active", output):
            test.cancel(
                "Can not start irqbalance inside guest.Skip this test.")

    def pin_vcpus2host_cpus():
        """ Pint the vcpus to the host cpus. """
        error_context.context("Pin vcpus to host cpus.", logging.info)
        host_numa_nodes = utils_misc.NumaInfo()
        vcpu_num = 0
        for numa_node_id in host_numa_nodes.nodes:
            numa_node = host_numa_nodes.nodes[numa_node_id]
            for _ in range(len(numa_node.cpus)):
                if vcpu_num >= len(vm.vcpu_threads):
                    break
                vcpu_tid = vm.vcpu_threads[vcpu_num]
                logging.debug("pin vcpu thread(%s) to cpu(%s)" %
                              (vcpu_tid, numa_node.pin_cpu(vcpu_tid)))
                vcpu_num += 1

    def verify_num_queues():
        """ Verify the number of queues. """
        error_context.context("Verify num_queues from monitor.", logging.info)
        qtree = qemu_qtree.QtreeContainer()
        try:
            qtree.parse_info_qtree(vm.monitor.info('qtree'))
        except AttributeError:
            test.cancel("Monitor deson't supoort qtree skip this test")
        error_msg = "Number of queues mismatch: expect %s report from monitor: %s(%s)"
        scsi_bus_addr = ""
        qtree_num_queues_full = ""
        qtree_num_queues = ""
        for node in qtree.get_nodes():
            type = node.qtree['type']
            if isinstance(node,
                          qemu_qtree.QtreeDev) and (type
                                                    == "virtio-scsi-device"):
                qtree_num_queues_full = node.qtree["num_queues"]
                qtree_num_queues = re.search("[0-9]+",
                                             qtree_num_queues_full).group()
            elif (isinstance(node,
                             qemu_qtree.QtreeDev)) and (type
                                                        == "virtio-scsi-pci"):
                scsi_bus_addr = node.qtree['addr']

        if qtree_num_queues != num_queues:
            error_msg = error_msg % (num_queues, qtree_num_queues,
                                     qtree_num_queues_full)
            test.fail(error_msg)
        if not scsi_bus_addr:
            test.error("Didn't find addr from qtree. Please check the log.")

    def check_interrupts():
        """ Check the interrupt queues in guest. """
        error_context.context("Check the interrupt queues in guest.",
                              logging.info)
        return session.cmd_output(irq_check_cmd)

    def check_interrupts2vcpus(irq_map):
        """ Check the status of interrupters to vcpus. """
        error_context.context("Check the status of interrupters to vcpus.",
                              logging.info)
        cpu_selects = {}
        cpu_select = 1
        for _ in range(int(num_queues)):
            val = ','.join([
                _[::-1] for _ in re.findall(r'\w{8}|\w+',
                                            format(cpu_select, 'x')[::-1])
            ][::-1])
            cpu_selects[val] = format(cpu_select, 'b').count('0')
            cpu_select = cpu_select << 1
        irqs_id_reset = []
        for irq_id in irq_map.keys():
            cmd = 'cat /proc/irq/%s/smp_affinity' % irq_id
            cpu_selected = re.sub(r'(^[0+,?0+]+)|(,)', '',
                                  session.cmd_output(cmd)).strip()
            if cpu_selected not in cpu_selects:
                irqs_id_reset.append(irq_id)
            else:
                cpu_irq_map[irq_id] = cpu_selects[cpu_selected]
                del cpu_selects[cpu_selected]
        return irqs_id_reset, cpu_selects

    def pin_interrupts2vcpus(irqs_id_reset, cpu_selects):
        """ Pint the interrupts to vcpus. """
        bind_cpu_cmd = []
        for irq_id, cpu_select in zip(irqs_id_reset, cpu_selects):
            bind_cpu_cmd.append("echo %s > /proc/irq/%s/smp_affinity" %
                                (cpu_select, irq_id))
            cpu_irq_map[irq_id] = cpu_selects[cpu_select]
        if bind_cpu_cmd:
            error_context.context("Pin interrupters to vcpus", logging.info)
            session.cmd(' && '.join(bind_cpu_cmd))
        return cpu_irq_map

    def _get_data_disks(session):
        """ Get the data disks. """
        output = session.cmd_output(
            params.get("get_dev_cmd", "ls /dev/[svh]d*"))
        system_dev = re.search(r"/dev/([svh]d\w+)(?=\d+)", output,
                               re.M).group(1)
        return (dev for dev in output.split() if system_dev not in dev)

    def check_io_status(timeout):
        """ Check the status of I/O. """
        chk_session = vm.wait_for_login(timeout=360)
        while int(chk_session.cmd_output("pgrep -lx dd | wc -l", timeout)):
            time.sleep(5)
        chk_session.close()

    def load_io_data_disks():
        """ Load I/O on data disks. """
        error_context.context("Load I/O in all targets", logging.info)
        dd_session = vm.wait_for_login(timeout=360)
        dd_timeout = int(re.findall(r"\d+", extra_image_size)[0])
        cmd = "dd of=%s if=/dev/urandom bs=1M count=%s oflag=direct &"
        cmds = [cmd % (dev, dd_timeout) for dev in _get_data_disks(dd_session)]
        if len(cmds) != images_num:
            test.error("Disks are not all show up in system, only %s disks." %
                       len(cmds))

        # As Bug 1177332 exists, mq is not supported completely.
        # So don't considering performance currently, dd_timeout is longer.
        dd_session.cmd(' '.join(cmds), dd_timeout * images_num * 2)
        check_io_status(dd_timeout)
        dd_session.close()

    def compare_interrupts(prev_irqs, cur_irqs):
        """ Compare the interrupts between after and before IO. """
        cpu_not_used = []
        diff_interrupts = {}
        for irq in prev_irqs.keys():
            cpu = int(cpu_irq_map[irq])
            diff_val = int(cur_irqs[irq]['count'][cpu]) - int(
                prev_irqs[irq]['count'][cpu])
            if diff_val == 0:
                cpu_not_used.append('CPU%s' % cpu)
            else:
                diff_interrupts[cpu] = diff_val
        logging.debug('The changed number of interrupts:')
        for k, v in sorted(diff_interrupts.items()):
            logging.debug('  CPU%s: %d' % (k, v))
        if cpu_not_used:
            cpus = " ".join(cpu_not_used)
            error_msg = ("%s are not used during test. "
                         "Please check debug log for more information.")
            test.fail(error_msg % cpus)

    def wmi_facility_test(session):
        driver_name = params["driver_name"]
        wmi_check_cmd = params["wmi_check_cmd"]
        pattern = params["pattern"]
        session = utils_test.qemu.windrv_check_running_verifier(
            session, vm, test, driver_name, timeout)
        wmi_check_cmd = utils_misc.set_winutils_letter(session, wmi_check_cmd)
        error_context.context("Run wmi check in guest.", logging.info)
        output = session.cmd_output(wmi_check_cmd)
        queue_num = re.findall(pattern, output, re.M)
        try:
            if not queue_num or queue_num[0] != num_queues:
                test.fail(
                    "The queue_num from guest is not match with expected.\n"
                    "queue_num from guest is %s, expected is %s" %
                    (queue_num, num_queues))
        finally:
            session.close()

    cpu_irq_map = {}
    timeout = float(params.get("login_timeout", 240))
    num_queues = str(utils_cpu.online_cpus_count())
    params['smp'] = num_queues
    params['num_queues'] = num_queues
    images_num = int(num_queues)
    extra_image_size = params.get("image_size_extra_images", "512M")
    system_image = params.get("images")
    system_image_drive_format = params.get("system_image_drive_format",
                                           "virtio")
    params["drive_format_%s" % system_image] = system_image_drive_format
    irq_check_cmd = params.get("irq_check_cmd", "cat /proc/interrupts")
    irq_name = params.get("irq_regex")
    status_cmd = "systemctl status irqbalance"

    error_context.context(
        "Boot up guest with block devcie with num_queues"
        " is %s and smp is %s" % (num_queues, params['smp']), logging.info)
    for vm in env.get_all_vms():
        if vm.is_alive():
            vm.destroy()
    create_data_images()
    params["start_vm"] = "yes"
    vm = env.get_vm(params["main_vm"])
    env_process.preprocess_vm(test, params, env, vm.name)
    session = vm.wait_for_login(timeout=timeout)
    if params["os_type"] == "windows":
        wmi_facility_test(session)
        return
    if not check_irqbalance_status():
        start_irqbalance_service()
    pin_vcpus2host_cpus()
    verify_num_queues()
    prev_irqs = check_interrupts()
    prev_mapping = get_mapping_interrupts2vcpus(prev_irqs, irq_name)
    pin_interrupts2vcpus(*check_interrupts2vcpus(prev_mapping))
    load_io_data_disks()
    cur_irqs = check_interrupts()
    cur_mapping = get_mapping_interrupts2vcpus(cur_irqs, irq_name)
    compare_interrupts(prev_mapping, cur_mapping)
Exemple #51
0
def run(test, params, env):
    """
    Check guest offset in non-event way.

    1) sync host system time with ntp server
    2) boot guest with '-rtc base=utc,clock=host,driftfix=slew'
    3) get output of "qom-get" command
    4) read RTC time inside guest
    5) adjust RTC time forward 1 hour in guest
    6) verify output of "qom-get"

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def get_hwtime(session):
        """
        Get guest's hardware clock in epoch.

        :param session: VM session.
        """
        hwclock_time_command = params.get("hwclock_time_command", "hwclock -u")
        hwclock_time_filter_re = params.get("hwclock_time_filter_re",
                                            r"(\d+-\d+-\d+ \d+:\d+:\d+).*")
        hwclock_time_format = params.get("hwclock_time_format",
                                         "%Y-%m-%d %H:%M:%S")
        output = session.cmd_output_safe(hwclock_time_command)
        try:
            str_time = re.findall(hwclock_time_filter_re, output)[0]
            guest_time = time.mktime(
                time.strptime(str_time, hwclock_time_format))
        except Exception as err:
            logging.debug("(time_format, output): (%s, %s)",
                          hwclock_time_format, output)
            raise err
        return guest_time

    ntp_cmd = params["ntp_cmd"]

    error_context.context("Sync host system time with ntpserver", logging.info)
    process.system(ntp_cmd, shell=True)

    vm = env.get_vm(params["main_vm"])
    session = vm.wait_for_login()

    hwclock_forward_cmd = params["hwclock_forward_cmd"]
    time_forward = params["time_forward"]
    drift_threshold = params["drift_threshold"]

    error_context.context("Get output of qom_get", logging.info)
    qom_st1 = vm.monitor.qom_get("/machine", "rtc-time")

    error_context.context("Get hardware time of guest", logging.info)
    hwclock_st1 = get_hwtime(session)
    logging.debug("hwclock: guest time=%ss", hwclock_st1)

    error_context.context("Adjust guest hardware time forward 1 hour",
                          logging.info)
    session.cmd(hwclock_forward_cmd, timeout=120)

    error_context.context("Verify output of qom-get", logging.info)
    qom_st2 = vm.monitor.qom_get("/machine", "rtc-time")

    qom_gap = int(qom_st2["tm_hour"]) - int(qom_st1["tm_hour"])
    if (qom_gap < 1) or (qom_gap > 2):
        test.fail("Unexpected offset in qom-get, "
                  "qom-get result before change guest's RTC time: %s, "
                  "qom-get result after change guest's RTC time: %s" %
                  (qom_st1, qom_st2))

    error_context.context("Verify guest hardware time", logging.info)
    hwclock_st2 = get_hwtime(session)
    logging.debug("hwclock: guest time=%ss", hwclock_st2)
    session.close()
    if (hwclock_st1 - hwclock_st2 -
            float(time_forward)) > float(drift_threshold):
        test.fail("Unexpected hwclock drift, "
                  "hwclock: current guest time=%ss" % hwclock_st2)
Exemple #52
0
def run(test, params, env):
    """
    Run yonit bitmap benchmark in Windows guests, especially win7 32bit,
    for regression test of BZ #556455.

    Run the benchmark (infinite) loop background using
    run_guest_test_background, and detect the existence of the process
    in guest.

      1. If the process exits before test timeout, that means the benchmark
      exits unexpectedly, and BSOD may have happened, which can be verified
      from the screenshot saved by virt-test.
      2. If just timeout happen, this test passes, i.e. the guest stays
      good while running the benchmark in the given time.

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """

    sec_per_day = 86400  # seconds per day
    test_timeout = int(params.get("test_timeout", sec_per_day))
    login_timeout = int(params.get("login_timeout", 360))

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=login_timeout)

    # Since the benchmark runs into an infinite loop, the background process
    # will never return, unless we get a BSOD.
    #
    # We set the test_timeout of the background guest_test much bigger than
    # that of this test to make sure that the background benchmark is still
    # running while the the foreground detecting is on going.
    error_context.context("run benchmark test in background", logging.info)
    params["test_timeout"] = test_timeout * 2 + sec_per_day
    logging.info("set Yonit bitmap test timeout to"
                 " %ss", params["test_timeout"])
    pid = guest_test.run_guest_test_background(test, params, env)
    if pid < 0:
        session.close()
        test.error("Could not create child process to execute "
                   "guest_test background")

    def is_yonit_benchmark_launched():
        if session.cmd_status(
                'tasklist | find /I "compress_benchmark_loop"') != 0:
            logging.debug("yonit bitmap benchmark was not found")
            return False
        return True

    error_context.context(
        "Watching Yonit bitmap benchmark is"
        " running until timeout", logging.info)
    try:
        # Start detecting whether the benchmark is started a few mins
        # after the background test launched, as the downloading
        # will take some time.
        launch_timeout = login_timeout
        if utils_misc.wait_for(is_yonit_benchmark_launched, launch_timeout,
                               180, 5):
            logging.debug("Yonit bitmap benchmark was launched successfully")
        else:
            test.error("Failed to launch yonit bitmap benchmark")

        # If the benchmark exits before timeout, errors happened.
        if utils_misc.wait_for(lambda: not is_yonit_benchmark_launched(),
                               test_timeout, 60, 10):
            test.error("Yonit bitmap benchmark exits unexpectly")
        else:
            if session.is_responsive():
                logging.info("Guest stays good until test timeout")
            else:
                test.fail("Guest is dead")
    finally:
        logging.info("Kill the background benchmark tracking process")
        utils_misc.safe_kill(pid, signal.SIGKILL)
        guest_test.wait_guest_test_background(pid)
        session.close()
def run(test, params, env):
    """
    Boot guest with different vectors, then do netperf testing.

    1) Boot up VM with vectors.
    2) Enable multi queues in guest.
    3) Check guest pci msi support.
    4) Check the cpu interrupt of virito driver.
    5) Run netperf test in guest.
    6) Repeat step 1 ~ step 5 with different vectors.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def boot_guest_with_vectors(vectors):
        error_context.context("Boot guest with vectors = %s" % vectors,
                              logging.info)
        params["vectors"] = vectors
        params["start_vm"] = "yes"
        try:
            env_process.preprocess(test, params, env)
        except virt_vm.VMError as err:
            if int(vectors) < 0:
                txt = "Parameter 'vectors' expects uint32_t"
                if re.findall(txt, str(err)):
                    return
        if int(vectors) < 0:
            msg = "Qemu did not raise correct error"
            msg += " when vectors = %s" % vectors
            test.fail(msg)

        vm = env.get_vm(params["main_vm"])
        vm.verify_alive()
        return vm

    def check_msi_support(session):
        vectors = int(params["vectors"])
        if params["os_type"] == "linux":
            devices = session.cmd_output("lspci | grep Eth").strip()
            error_context.context("Check if vnic inside guest support msi.",
                                  logging.info)
            for device in devices.split("\n"):
                if not device:
                    continue
                d_id = device.split()[0]
                msi_check_cmd = params["msi_check_cmd"] % d_id
                status, output = session.cmd_status_output(msi_check_cmd)
                if vectors == 0 and output:
                    if (re.findall("MSI-X: Enable+", output)):
                        test.fail("Guest don't support msi when vectors=0")
                    logging.info("Guest works well when vectors=0")
                elif vectors != 0 and status:
                    msg = "Could not get ouptut,"
                    msg += " when vectors = %d" % vectors
                    test.fail("msg")
                elif vectors == 1 and output:
                    if not (re.findall("MSI-X: Enable-", output)):
                        msg = "Command %s get wrong output." % msi_check_cmd
                        msg += " when vectors = 1"
                        test.fail(msg)
                    logging.info("MSI-X is disabled")
                elif 2 <= vectors and output:
                    if not (re.findall("MSI-X: Enable+", output)):
                        msg = "Command %s get wrong output." % msi_check_cmd
                        msg += " when vectors = %d" % vectors
                        test.fail(msg)
                    logging.info("MSI-X is enabled")
        else:
            error_context.context(
                "Check if the driver is installed and "
                "verified", logging.info)
            driver_name = params.get("driver_name", "netkvm")
            utils_test.qemu.windrv_check_running_verifier(
                session, vm, test, driver_name, cmd_timeout)
            msis, queues = utils_net.get_msis_and_queues_windows(params, vm)
            if msis == 0 and vectors == 0:
                logging.info("Guest works well when vectors=0")
            elif vectors == 0 and msis != 0:
                test.fail("Can't get msi status when vectors=0")
            if 1 <= vectors and vectors != msis:
                test.fail("Msis should equal to vectors(%s), "
                          "but guest is %s" % (vectors, msis))

    def check_interrupt(session, vectors):
        error_context.context("Check the cpu interrupt of virito",
                              logging.info)
        vectors = int(vectors)
        irq_check_cmd = params["irq_check_cmd"]
        output = session.cmd_output(irq_check_cmd).strip()
        if vectors == 0 or vectors == 1:
            if not (re.findall("IO-APIC.*fasteoi|XICS.*Level|XIVE.*Level",
                               output)):
                msg = "Could not find interrupt controller for virito device"
                msg += " when vectors = %d" % vectors
                test.fail(msg)
        elif 2 <= vectors <= 8:
            if not re.findall("virtio[0-9]-virtqueues", output):
                msg = "Could not find the virtio device for MSI-X interrupt"
                msg += " when vectors = %d " % vectors
                msg += "Command %s got output %s" % (irq_check_cmd, output)
                test.fail(msg)
        elif vectors == 9 or vectors == 10:
            if not (re.findall("virtio[0-9]-input", output)
                    and re.findall("virtio[0-9]-output", output)):
                msg = "Could not find the virtio device for MSI-X interrupt"
                msg += " when vectors = %d " % vectors
                msg += "Command %s got output %s" % (irq_check_cmd, output)
                test.fail(msg)

    def netperf_test():
        """
        Netperf stress test for nic option.
        """
        n_client = utils_netperf.NetperfClient(
            vm.get_address(),
            params.get("client_path"),
            netperf_source=os.path.join(data_dir.get_deps_dir("netperf"),
                                        params.get("netperf_client_link")),
            client=params.get("shell_client"),
            port=params.get("shell_port"),
            username=params.get("username"),
            password=params.get("password"),
            prompt=params.get("shell_prompt"),
            linesep=params.get("shell_linesep",
                               "\n").encode().decode('unicode_escape'),
            status_test_command=params.get("status_test_command", ""),
            compile_option=params.get("compile_option", ""))
        n_server = utils_netperf.NetperfServer(
            utils_net.get_host_ip_address(params),
            params.get("server_path", "/var/tmp"),
            netperf_source=os.path.join(data_dir.get_deps_dir("netperf"),
                                        params.get("netperf_server_link")),
            password=params.get("hostpassword"),
            compile_option=params.get("compile_option", ""))

        try:
            n_server.start()
            # Run netperf with message size defined in range.
            netperf_test_duration = params.get_numeric("netperf_test_duration")
            test_protocols = params.get("test_protocols", "TCP_STREAM")
            netperf_output_unit = params.get("netperf_output_unit")
            test_option = params.get("test_option", "")
            test_option += " -l %s" % netperf_test_duration
            if netperf_output_unit in "GMKgmk":
                test_option += " -f %s" % netperf_output_unit
            t_option = "%s -t %s" % (test_option, test_protocols)
            n_client.bg_start(utils_net.get_host_ip_address(params),
                              t_option,
                              params.get_numeric("netperf_para_sessions"),
                              params.get("netperf_cmd_prefix", ""),
                              package_sizes=params.get("netperf_sizes"))
            if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 1,
                                   "Wait netperf test start"):
                logging.info("Netperf test start successfully.")
            else:
                test.error("Can not start netperf client.")
            utils_misc.wait_for(
                lambda: not n_client.is_netperf_running(),
                netperf_test_duration, 0, 5,
                "Wait netperf test finish %ss" % netperf_test_duration)
        finally:
            n_server.stop()
            n_server.cleanup(True)
            n_client.cleanup(True)

    vectors_list = params["vectors_list"]
    login_timeout = int(params.get("login_timeout", 360))
    cmd_timeout = int(params.get("cmd_timeout", 240))
    for vectors in vectors_list.split():
        vm = boot_guest_with_vectors(vectors)
        if int(vectors) < 0:
            continue
        session = vm.wait_for_login(timeout=login_timeout)
        check_msi_support(session)
        if params["os_type"] == "linux":
            check_interrupt(session, vectors)
        error_context.context("Run netperf test in guest.", logging.info)
        netperf_test()
        vm.destroy(gracefully=True)
Exemple #54
0
def run(test, params, env):
    """
    Balloon guest memory when guest started in paused status,
    use M when compare memory in this script:
    1) Boot a guest with balloon enabled and in paused status,
    i.e. '-S' used but not cont
    2) Evict guest memory in paused status, cont the guest;
    check memory in monitor
    3) To check if the guest memory balloon working well after above test,
    continue to do:
    3.1) Enlarge guest memory in running status;
    check memory both in guest and monitor
    3.2) Evict guest memory in running status;
    check memory both in guest and monitor
    4) Run subtest if necessary
    5) Reset memory back to the original value

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _memory_check_after_sub_test():
        """
        Check memory status after subtest, the changed_mem is 0
        """
        try:
            return balloon_test.memory_check("after subtest", 0)
        except exceptions.TestFail:
            return None

    if params['os_type'] == 'windows':
        balloon_test = BallooningTestPauseWin(test, params, env)
    else:
        balloon_test = BallooningTestPauseLinux(test, params, env)

    min_sz, max_sz = balloon_test.get_memory_boundary()

    for tag in params.objects('test_tags'):
        vm = env.get_vm(params["main_vm"])
        if vm.monitor.verify_status('paused'):
            error_context.context(
                "Running balloon %s test when"
                " the guest in paused status" % tag, logging.info)
        else:
            error_context.context(
                "Running balloon %s test after"
                " the guest turned to running status" % tag, logging.info)
        params_tag = params.object_params(tag)
        balloon_type = params_tag['balloon_type']
        if balloon_type == 'evict':
            expect_mem = int(random.uniform(min_sz, balloon_test.old_mmem))
        else:
            expect_mem = int(random.uniform(balloon_test.old_mmem, max_sz))

        balloon_test.balloon_memory(expect_mem)
        changed_memory = abs(balloon_test.old_mmem - expect_mem)
        mmem, gmem = balloon_test.memory_check("after %s memory" % tag,
                                               changed_memory)
        balloon_test.old_mmem = mmem
        balloon_test.old_gmem = gmem

    subtest = params.get("sub_test_after_balloon")
    if subtest:
        error_context.context("Running subtest after guest balloon test",
                              logging.info)
        qemu_should_quit = balloon_test.run_balloon_sub_test(
            test, params, env, subtest)
        if qemu_should_quit == 1:
            return

        sleep_before_check = int(params.get("sleep_before_check", 0))
        timeout = int(params.get("balloon_timeout", 100)) + sleep_before_check
        msg = "Wait memory balloon back after %s " % subtest
        output = utils_misc.wait_for(_memory_check_after_sub_test, timeout,
                                     sleep_before_check, 5, msg)
        if output is None:
            raise exceptions.TestFail("Check memory status failed after "
                                      "subtest after %s seconds" % timeout)

    error_context.context(
        "Reset guest memory to original one after all the "
        "test", logging.info)
    balloon_test.reset_memory()
Exemple #55
0
    def add_device(pci_num):
        global iface_scripts
        reference_cmd = params["reference_cmd"]
        info_pci_ref = vm.monitor.info("pci")
        session = vm.wait_for_serial_login(timeout=timeout)
        reference = session.cmd_output(reference_cmd)
        active_nics = get_active_network_device(session, nic_filter)
        logging.debug("Active nics before hotplug - %s", active_nics)

        # Stop the VM monitor and try hot adding SRIOV dev
        if params.get("vm_stop", "no") == "yes":
            logging.debug("stop the monitor of the VM before hotplug")
            vm.pause()
        try:
            # get function for adding device.
            add_function = local_functions["%s_iov" % cmd_type]
        except Exception:
            test.error("No function for adding sr-iov dev with '%s'" %
                       cmd_type)
        after_add = None
        if add_function:
            # Do add pci device.
            after_add = add_function(pci_num)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                output = session.cmd_output(reference_cmd)
                return output != reference

            # Define a helper function to make sure new nic could get ip.
            def _check_ip():
                post_nics = get_active_network_device(session, nic_filter)
                logging.debug("Active nics after hotplug - %s", post_nics)
                return (len(active_nics) <= len(post_nics)
                        and active_nics != post_nics)

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output("lspci -nn")
                if re.search(vf_filter, output, re.IGNORECASE):
                    return True
                else:
                    return False

            # Resume the VM
            if params.get("vm_resume", "no") == "yes":
                logging.debug("resuming the VM after hotplug")
                vm.resume()

            # Reboot the VM
            if params.get("vm_reboot", "no") == "yes":
                logging.debug("Rebooting the VM after hotplug")
                vm.reboot()
            session = vm.wait_for_serial_login(timeout=timeout)

            error_context.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                test.fail("No new PCI device shown after executing "
                          "monitor command: 'info pci'")

            secs = int(params["wait_secs_for_hook_up"])
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                test.fail("No new device shown in output of command "
                          "executed inside the guest: %s" % reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                test.fail("New add device not found in guest. "
                          "Command was: lspci -nn")

            # Assign static IP to the hotplugged interface
            if params.get("assign_static_ip", "no") == "yes":
                cmd = "service networking restart"
                static_ip = next(ip_gen)
                net_mask = params.get("static_net_mask", "255.255.255.0")
                broadcast = params.get("static_broadcast", "10.10.10.255")
                pci_id = utils_misc.get_pci_id_using_filter(vf_filter, session)
                logging.debug("PCIs associated with %s - %s", vf_filter,
                              ', '.join(map(str, pci_id)))
                for each_pci in pci_id:
                    iface_name = utils_misc.get_interface_from_pci_id(
                        each_pci, session)
                    logging.debug("Interface associated with PCI %s - %s",
                                  each_pci, iface_name)
                    mac = session.cmd_output("ethtool -P %s" % iface_name)
                    mac = mac.split("Permanent address:")[-1].strip()
                    logging.debug("mac address of %s: %s", iface_name, mac)
                    # backup the network script for other distros
                    if "ubuntu" not in vm.get_distro().lower():
                        cmd = "service network restart"
                        iface_scripts.append(
                            utils_net.get_network_cfg_file(iface_name))
                    if not check_interface(str(iface_name), nic_filter):
                        utils_net.create_network_script(iface_name,
                                                        mac,
                                                        boot_proto="static",
                                                        net_mask=net_mask,
                                                        vm=vm,
                                                        ip_addr=static_ip)
                        status, output = session.cmd_status_output(cmd)
                        if status:
                            test.error("Failed to set static ip in guest: "
                                       "%s" % output)
            # Test the newly added device
            if not utils_misc.wait_for(_check_ip, 120, 3, 3):
                ifconfig = session.cmd_output("ifconfig -a")
                test.fail("New hotpluged device could not get ip "
                          "after 120s in guest. guest ifconfig "
                          "output: \n%s" % ifconfig)
            try:
                session.cmd(params["pci_test_cmd"] % (pci_num + 1))
            except aexpect.ShellError as e:
                test.fail("Check device failed after PCI "
                          "hotplug. Output: %r" % e.output)

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise
Exemple #56
0
def run(test, params, env):
    """
    Verify SLOF info with hugepage.

    Step:
     1. Assign definite size hugepage and mount it in host.
     2. Boot a guest by following ways:
         a. hugepage as backing file
         b. hugepage not as backing file
        then Check if any error info in output of SLOF.
     3. Get the size of memory inside guest.
     4. Hot plug pc-dimm by QMP.
     5. Get the size of memory after hot plug pc-dimm inside guest,
        then check the different value of memory.
     6. Reboot guest.
     7. Guest could login successfully.
     8. Guest could ping external host ip.

    :param test: Qemu test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def _wait_for_login(cur_pos=0):
        """Wait for login guest."""
        content, next_pos = slof.wait_for_loaded(vm, test, cur_pos)
        error_context.context("Check the output of SLOF.", logging.info)
        slof.check_error(test, content)

        error_context.context("Try to log into guest '%s'." % vm.name,
                              logging.info)
        timeout = float(params.get("login_timeout", 240))
        session = vm.wait_for_login(timeout=timeout)
        logging.info("log into guest '%s' successfully." % vm.name)
        return session, next_pos

    _setup_hugepage(params)

    params['start_vm'] = 'yes'
    env_process.preprocess_vm(test, params, env, params["main_vm"])

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session, next_pos = _wait_for_login()

    error_context.context('Get guest free memory size before hotplug pc-dimm.',
                          logging.info)
    orig_mem = int(session.cmd_output(cmd=params['free_mem_cmd']))
    logging.debug('Guest free memory size is %d bytes' % orig_mem)

    error_context.context('Hotplug pc-dimm for guest.', logging.info)
    htp_mem = MemoryHotplugTest(test, params, env)
    htp_mem.hotplug_memory(vm, params['plug_mem_name'])

    plug_timeout = float(params.get('plug_timeout', 5))
    if not utils_misc.wait_for(
            lambda: _check_mem_increase(session, params, orig_mem),
            plug_timeout):
        test.fail("Guest memory size is not increased %s in %s sec." %
                  (params['size_plug'], params.get('plug_timeout', 5)))

    error_context.context('Reboot guest', logging.info)
    session.close()
    vm.reboot()

    session, _ = _wait_for_login(next_pos)
    error_context.context("Try to ping external host.", logging.info)
    extra_host_ip = utils_net.get_host_ip_address(params)
    session.cmd('ping %s -c 5' % extra_host_ip)
    logging.info("Ping host(%s) successfully." % extra_host_ip)

    session.close()
    vm.destroy(gracefully=True)
Exemple #57
0
def run(test, params, env):
    """
    Special hardware test case.
    FC host: ibm-x3650m4-05.lab.eng.pek2.redhat.com
    Disk serial name: scsi-360050763008084e6e0000000000001a4
    # multipath -ll
    mpathb (360050763008084e6e0000000000001a8) dm-4 IBM,2145
    size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
    |-+- policy='service-time 0' prio=50 status=active
    | `- 2:0:1:0 sde 8:64 active ready running
    `-+- policy='service-time 0' prio=10 status=enabled
      `- 2:0:0:0 sdd 8:48 active ready running
    mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
    size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
    |-+- policy='service-time 0' prio=50 status=active
    | `- 1:0:1:0 sdc 8:32 active ready running
    `-+- policy='service-time 0' prio=10 status=enabled
      `- 1:0:0:0 sdb 8:16 active ready running
    Customer Bug ID: 1741937  1673546

    Test if VM paused/resume when fc storage offline/online.

    1) pass-through /dev/mapper/mpatha
    2) dd test on pass-through disk
    3) Disconnect the storage during dd test
    4) Check if VM status is 'paused'
    5) Connect the storage, Wait until the storage is accessible again
    6) resume the vm
    7) Check if VM status is 'running'
    8) dd test completed successfully

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def check_vm_status(vm, status):
        """
        Check if VM has the given status or not.

        :param vm: VM object.
        :param status: String with desired status.
        :return: True if VM status matches our desired status.
        :return: False if VM status does not match our desired status.
        """
        try:
            vm.verify_status(status)
        except (virt_vm.VMStatusError, qemu_monitor.MonitorLockError):
            return False
        else:
            return True

    def get_multipath_disks(mpath_name="mpatha"):
        """
        Get all disks of multiple paths.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16  active ready running

        :param mpath_name: multi-path name.
        :return: a list. if get disks successfully or raise a error
        """
        disks = []
        disk_str = []
        outputs = process.run("multipath -ll", shell=True).stdout.decode()
        outputs = outputs.split(mpath_name)[-1]
        disk_str.append("active ready running")
        disk_str.append("active faulty offline")
        disk_str.append("failed faulty offline")
        for line in outputs.splitlines():
            if disk_str[0] in line or disk_str[1] in line or disk_str[
                    2] in line:
                disks.append(line.split()[-5])
        if not disks:
            test.fail("Failed to get disks by 'multipath -ll'")
        else:
            return disks

    def get_multipath_disks_status(mpath_name="mpatha"):
        """
        Get status of multiple paths.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16  active ready running

        :param mpath_name: multi-path name.
        :return: a list. if get status successfully or raise a error
        """
        disks = get_multipath_disks(mpath_name)
        disks_status = []
        outputs = process.run("multipath -ll", shell=True).stdout.decode()
        outputs = outputs.split(mpath_name)[-1]
        for line in outputs.splitlines():
            for i in range(len(disks)):
                if disks[i] in line:
                    disks_status.append(line.strip().split()[-1])
                    break
        if not disks_status or len(disks_status) != len(disks):
            test.fail("Failed to get disks status by 'multipath -ll'")
        else:
            return disks_status

    def compare_multipath_status(status, mpath_name="mpatha"):
        """
        Compare status whether equal to the given status.
        This function just focus on all paths are running or all are offline.

        :param status: the state of disks.
        :param mpath_name: multi-path name.
        :return: True, if equal to the given status or False
        """
        status_list = get_multipath_disks_status(mpath_name)
        if len(set(status_list)) == 1 and status_list[0] == status:
            return True
        else:
            return False

    def set_disk_status_to_online_offline(disk, status):
        """
        set disk state to online/offline.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16 failed faulty offline

        :param disk: disk name.
        :param status: the state of disk.
        :return: by default
        """
        error_context.context("Set disk '%s' to status '%s'." % (disk, status),
                              logging.info)
        process.run("echo %s >  /sys/block/%s/device/state" % (status, disk),
                    shell=True)

    def set_multipath_disks_status(disks, status):
        """
        set multiple paths to same status. all disks online or offline.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16 failed faulty offline

        :param disks: disk list.
        :param status: the state of disk. online/offline
        :return: by default
        """
        for disk in disks:
            set_disk_status_to_online_offline(disk, status)
        wait.wait_for(lambda: compare_multipath_status(status),
                      first=wait_time,
                      step=1.5,
                      timeout=60)

    def run_backgroud_process(session, bg_cmd):
        """
        run a background process.

        :param session: A shell session object.
        :param bg_cmd: run it in background.
        :return: background thread
        """
        error_context.context("Start a background process: '%s'" % bg_cmd,
                              logging.info)
        args = (bg_cmd, 360)
        bg = utils_test.BackgroundTest(session.cmd, args)
        bg.start()
        if not utils_misc.wait_for(lambda: bg.is_alive, 60):
            test.fail("Failed to start background process: '%s'" % bg_cmd)
        return bg

    error_context.context("Get FC host name:", logging.info)
    hostname = process.run("hostname", shell=True).stdout.decode().strip()
    if hostname != params["special_host"]:
        test.cancel("The special host is not '%s', cancel the test." %
                    params["special_host"])
    error_context.context("Get FC disk serial name:", logging.info)
    outputs = process.run("multipath -ll",
                          shell=True).stdout.decode().splitlines()
    stg_serial_name = params["stg_serial_name"]
    image_name_stg = params["image_name_stg"]
    mpath_name = image_name_stg.split("/")[-1]
    for output in outputs:
        if stg_serial_name in output and mpath_name in output:
            break
    else:
        test.cancel("The special disk is not '%s', cancel the test." %
                    stg_serial_name)
    wait_time = float(params.get("sub_test_wait_time", 0))
    multi_disks = get_multipath_disks(mpath_name)
    error_context.context(
        "Get all disks for '%s': %s" % (mpath_name, multi_disks), logging.info)
    error_context.context(
        "Verify all paths are running for %s before"
        "start vm." % mpath_name, logging.info)
    if compare_multipath_status("running", mpath_name):
        logging.info("All paths are running for %s." % mpath_name)
    else:
        test.cancel("Not all paths are running for %s, please "
                    "check environment." % mpath_name)
    vm = env.get_vm(params["main_vm"])
    try:
        vm.create(params=params)
    except Exception as e:
        test.error("failed to create VM: %s" % six.text_type(e))
    session = vm.wait_for_login(timeout=int(params.get("timeout", 240)))
    try:
        error_context.context("Make sure guest is running before test",
                              logging.info)
        vm.resume()
        vm.verify_status("running")
        bg_cmd = params["bg_cmd"]
        bg = run_backgroud_process(session, bg_cmd)
        set_multipath_disks_status(multi_disks, "offline")
        error_context.context("Check if VM status is 'paused'", logging.info)
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "paused"), 60):
            test.fail("Guest is not paused after all disks offline")
        error_context.context(
            "Re-connect fc storage, wait until the "
            "storage is accessible again", logging.info)
        set_multipath_disks_status(multi_disks, "running")
        error_context.context("Resume the vm.", logging.info)
        vm.resume()
        error_context.context("Check if VM status is 'running'", logging.info)
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "running"), 60):
            test.fail("Guest is not running after all disks online")
        if bg:
            error_context.context("Wait for dd testing completed",
                                  logging.info)
            bg.join()
        error_context.context(
            "Verify Host and guest kernel no error "
            "and call trace", logging.info)
        vm.verify_kernel_crash()
        error_context.context("Verify dmesg no error", logging.info)
        vm.verify_dmesg()
    finally:
        set_multipath_disks_status(multi_disks, "running")
        session.close()
        vm.destroy(gracefully=True)
Exemple #58
0
def run(test, params, env):
    """
    Test hotplug of sr-iov devices.

    (Elements between [] are configurable test parameters)
    1) Set up sr-iov test environment in host.
    2) Start VM.
    3) Disable the primary link(s) of guest.
    4) PCI add one/multi sr-io  deivce with (or without) repeat
    5) Compare output of monitor command 'info pci'.
    6) Compare output of guest command [reference_cmd].
    7) Verify whether pci_model is shown in [pci_find_cmd].
    8) Check whether the newly added PCI device works fine.
    9) Delete the device, verify whether could remove the sr-iov device.
    10) Re-enabling the primary link(s) of guest.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    def check_interface(iface, nic_filter):
        cmd = "ifconfig %s" % str(iface)
        session = vm.wait_for_serial_login(timeout=timeout)
        status, output = session.cmd_status_output(cmd)
        if status:
            test.error("Guest command '%s' fail with output: %s." %
                       (cmd, output))
        if re.findall(nic_filter, output, re.MULTILINE | re.DOTALL):
            return True
        return False

    def get_active_network_device(session, nic_filter):
        devnames = []
        cmd = "ifconfig -a"
        nic_reg = r"\w+(?=: flags)|\w+(?=\s*Link)"
        status, output = session.cmd_status_output(cmd)
        if status:
            test.error("Guest command '%s' fail with output: %s." %
                       (cmd, output))
        ifaces = re.findall(nic_reg, output)
        for iface in ifaces:
            if check_interface(str(iface), nic_filter):
                devnames.append(iface)
        return devnames

    def pci_add_iov(pci_num):
        pci_add_cmd = ("pci_add pci_addr=auto host host=%s,if=%s" %
                       (pa_pci_ids[pci_num], pci_model))
        if params.get("hotplug_params"):
            assign_param = params.get("hotplug_params").split()
            for param in assign_param:
                value = params.get(param)
                if value:
                    pci_add_cmd += ",%s=%s" % (param, value)
        return pci_add(pci_add_cmd)

    def pci_add(pci_add_cmd):
        error_context.context("Adding pci device with command 'pci_add'")
        add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
        pci_info.append(['', add_output])
        if "OK domain" not in add_output:
            test.fail("Add PCI device failed. Monitor command is: %s, "
                      "Output: %r" % (pci_add_cmd, add_output))
        return vm.monitor.info("pci")

    def check_support_device(dev):
        if vm.monitor.protocol == 'qmp':
            devices_supported = vm.monitor.human_monitor_cmd("%s ?" % cmd_type)
        else:
            devices_supported = vm.monitor.send_args_cmd("%s ?" % cmd_type)
        # Check if the device is support in qemu
        is_support = utils_misc.find_substring(devices_supported, dev)
        if not is_support:
            test.error("%s doesn't support device: %s" % (cmd_type, dev))

    def device_add_iov(pci_num):
        device_id = "%s" % pci_model + "-" + utils_misc.generate_random_id()
        pci_info.append([device_id])
        driver = params.get("device_driver", "pci-assign")
        check_support_device(driver)
        pci_add_cmd = ("device_add id=%s,driver=%s,host=%s" %
                       (pci_info[pci_num][0], driver, pa_pci_ids[pci_num]))
        if params.get("hotplug_params"):
            assign_param = params.get("hotplug_params").split()
            for param in assign_param:
                value = params.get(param)
                if value:
                    pci_add_cmd += ",%s=%s" % (param, value)
        return device_add(pci_num, pci_add_cmd)

    def device_add(pci_num, pci_add_cmd):
        error_context.context("Adding pci device with command 'device_add'")
        if vm.monitor.protocol == 'qmp':
            add_output = vm.monitor.send_args_cmd(pci_add_cmd)
        else:
            add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
        pci_info[pci_num].append(add_output)
        after_add = vm.monitor.info("pci")
        if pci_info[pci_num][0] not in str(after_add):
            logging.debug("Print info pci after add the block: %s" % after_add)
            test.fail("Add device failed. Monitor command is: %s"
                      ". Output: %r" % (pci_add_cmd, add_output))
        return after_add

    def clean_network_scripts():
        logging.debug("Clean up network scripts in guest")
        session = vm.wait_for_serial_login(timeout=timeout)
        if "ubuntu" in vm.get_distro().lower():
            iface_script = "/etc/network/interfaces"
            cmd = "cat %s.BACKUP" % iface_script
            if not session.cmd_status(cmd):
                cmd = "mv %s.BACKUP %s" % (iface_script, iface_script)
                status, output = session.cmd_status_output(cmd)
                if status:
                    test.error("Failed to cleanup network script in guest: "
                               "%s" % output)
        else:
            global iface_scripts
            for iface_script in iface_scripts:
                cmd = "rm -f %s" % iface_script
                status, output = session.cmd_status_output(cmd)
                if status:
                    test.error("Failed to delete iface_script")
                iface_scripts.remove(iface_script)

    # Hot add a pci device
    def add_device(pci_num):
        global iface_scripts
        reference_cmd = params["reference_cmd"]
        info_pci_ref = vm.monitor.info("pci")
        session = vm.wait_for_serial_login(timeout=timeout)
        reference = session.cmd_output(reference_cmd)
        active_nics = get_active_network_device(session, nic_filter)
        logging.debug("Active nics before hotplug - %s", active_nics)

        # Stop the VM monitor and try hot adding SRIOV dev
        if params.get("vm_stop", "no") == "yes":
            logging.debug("stop the monitor of the VM before hotplug")
            vm.pause()
        try:
            # get function for adding device.
            add_function = local_functions["%s_iov" % cmd_type]
        except Exception:
            test.error("No function for adding sr-iov dev with '%s'" %
                       cmd_type)
        after_add = None
        if add_function:
            # Do add pci device.
            after_add = add_function(pci_num)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                output = session.cmd_output(reference_cmd)
                return output != reference

            # Define a helper function to make sure new nic could get ip.
            def _check_ip():
                post_nics = get_active_network_device(session, nic_filter)
                logging.debug("Active nics after hotplug - %s", post_nics)
                return (len(active_nics) <= len(post_nics)
                        and active_nics != post_nics)

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output("lspci -nn")
                if re.search(vf_filter, output, re.IGNORECASE):
                    return True
                else:
                    return False

            # Resume the VM
            if params.get("vm_resume", "no") == "yes":
                logging.debug("resuming the VM after hotplug")
                vm.resume()

            # Reboot the VM
            if params.get("vm_reboot", "no") == "yes":
                logging.debug("Rebooting the VM after hotplug")
                vm.reboot()
            session = vm.wait_for_serial_login(timeout=timeout)

            error_context.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                test.fail("No new PCI device shown after executing "
                          "monitor command: 'info pci'")

            secs = int(params["wait_secs_for_hook_up"])
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                test.fail("No new device shown in output of command "
                          "executed inside the guest: %s" % reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                test.fail("New add device not found in guest. "
                          "Command was: lspci -nn")

            # Assign static IP to the hotplugged interface
            if params.get("assign_static_ip", "no") == "yes":
                cmd = "service networking restart"
                static_ip = next(ip_gen)
                net_mask = params.get("static_net_mask", "255.255.255.0")
                broadcast = params.get("static_broadcast", "10.10.10.255")
                pci_id = utils_misc.get_pci_id_using_filter(vf_filter, session)
                logging.debug("PCIs associated with %s - %s", vf_filter,
                              ', '.join(map(str, pci_id)))
                for each_pci in pci_id:
                    iface_name = utils_misc.get_interface_from_pci_id(
                        each_pci, session)
                    logging.debug("Interface associated with PCI %s - %s",
                                  each_pci, iface_name)
                    mac = session.cmd_output("ethtool -P %s" % iface_name)
                    mac = mac.split("Permanent address:")[-1].strip()
                    logging.debug("mac address of %s: %s", iface_name, mac)
                    # backup the network script for other distros
                    if "ubuntu" not in vm.get_distro().lower():
                        cmd = "service network restart"
                        iface_scripts.append(
                            utils_net.get_network_cfg_file(iface_name))
                    if not check_interface(str(iface_name), nic_filter):
                        utils_net.create_network_script(iface_name,
                                                        mac,
                                                        boot_proto="static",
                                                        net_mask=net_mask,
                                                        vm=vm,
                                                        ip_addr=static_ip)
                        status, output = session.cmd_status_output(cmd)
                        if status:
                            test.error("Failed to set static ip in guest: "
                                       "%s" % output)
            # Test the newly added device
            if not utils_misc.wait_for(_check_ip, 120, 3, 3):
                ifconfig = session.cmd_output("ifconfig -a")
                test.fail("New hotpluged device could not get ip "
                          "after 120s in guest. guest ifconfig "
                          "output: \n%s" % ifconfig)
            try:
                session.cmd(params["pci_test_cmd"] % (pci_num + 1))
            except aexpect.ShellError as e:
                test.fail("Check device failed after PCI "
                          "hotplug. Output: %r" % e.output)

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise

    # Hot delete a pci device
    def pci_del(pci_num, ignore_failure=False):
        def _device_removed():
            after_del = vm.monitor.info("pci")
            return after_del != before_del

        before_del = vm.monitor.info("pci")
        if cmd_type == "pci_add":
            slot_id = "0" + pci_info[pci_num][1].split(",")[2].split()[1]
            cmd = "pci_del pci_addr=%s" % slot_id
            vm.monitor.send_args_cmd(cmd, convert=False)
        elif cmd_type == "device_add":
            cmd = "device_del id=%s" % pci_info[pci_num][0]
            vm.monitor.send_args_cmd(cmd)

        if (not utils_misc.wait_for(_device_removed, test_timeout, 0, 1)
                and not ignore_failure):
            test.fail("Failed to hot remove PCI device: %s. "
                      "Monitor command: %s" % (pci_model, cmd))

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_serial_login(timeout=timeout)

    test_timeout = int(params.get("test_timeout", 360))
    # Test if it is nic or block
    pci_num_range = int(params.get("pci_num", 1))
    rp_times = int(params.get("repeat_times", 1))
    pci_model = params.get("pci_model", "pci-assign")
    vf_filter = params.get("vf_filter_re")
    generate_mac = params.get("generate_mac", "yes")
    nic_filter = params["nic_interface_filter"]
    devices = []
    device_type = params.get("hotplug_device_type", "vf")
    for i in range(pci_num_range):
        device = {}
        device["type"] = device_type
        if generate_mac == "yes":
            device['mac'] = utils_net.generate_mac_address_simple()
        if params.get("device_name"):
            device["name"] = params.get("device_name")
        devices.append(device)
    device_driver = params.get("device_driver", "pci-assign")
    if vm.pci_assignable is None:
        vm.pci_assignable = test_setup.PciAssignable(
            driver=params.get("driver"),
            driver_option=params.get("driver_option"),
            host_set_flag=params.get("host_setup_flag"),
            kvm_params=params.get("kvm_default"),
            vf_filter_re=vf_filter,
            pf_filter_re=params.get("pf_filter_re"),
            device_driver=device_driver,
            pa_type=params.get("pci_assignable"))

    pa_pci_ids = vm.pci_assignable.request_devs(devices)
    # Modprobe the module if specified in config file
    module = params.get("modprobe_module")
    if module:
        error_context.context("modprobe the module %s" % module, logging.info)
        session.cmd("modprobe %s" % module)

    # Probe qemu to verify what is the supported syntax for PCI hotplug
    if vm.monitor.protocol == 'qmp':
        cmd_o = vm.monitor.info("commands")
    else:
        cmd_o = vm.monitor.send_args_cmd("help")

    cmd_type = utils_misc.find_substring(str(cmd_o), "device_add", "pci_add")
    if not cmd_o:
        test.error("Unknown version of qemu")

    local_functions = locals()

    if params.get("enable_set_link" "yes") == "yes":
        error_context.context("Disable the primary link(s) of guest",
                              logging.info)
        for nic in vm.virtnet:
            vm.set_link(nic.device_id, up=False)

    try:
        for j in range(rp_times):
            # pci_info is a list of list.
            # each element 'i' has 4 members:
            # pci_info[i][0] == device id, only used for device_add
            # pci_info[i][1] == output of device add command
            pci_info = []
            if params.get("assign_static_ip", "no") == "yes":
                ip_gen = utils_net.gen_ipv4_addr(exclude_ips=[])
                # backup the network script file if it is ubuntu
                if "ubuntu" in vm.get_distro().lower():
                    session = vm.wait_for_serial_login(timeout=timeout)
                    iface_script = "/etc/network/interfaces"
                    cmd = "cat %s" % iface_script
                    if not session.cmd_status(cmd):
                        logging.debug("Backup network script in guest - %s",
                                      iface_script)
                        cmd = "cp %s %s.BACKUP" % (iface_script, iface_script)
                        status, output = session.cmd_status_output(cmd)
                        if status:
                            test.error("Failed to backup in guest: %s" %
                                       output)
            for pci_num in range(pci_num_range):
                msg = "Start hot-adding %sth pci device," % (pci_num + 1)
                msg += " repeat %d" % (j + 1)
                error_context.context(msg, logging.info)
                add_device(pci_num)
            sub_type = params.get("sub_type_after_plug")
            if sub_type:
                error_context.context(
                    "Running sub test '%s' after hotplug" % sub_type,
                    logging.info)
                utils_test.run_virt_sub_test(test, params, env, sub_type)
                if "guest_suspend" == sub_type:
                    # Hotpluged device have been released after guest suspend,
                    # so do not need unpluged step.
                    break
            for pci_num in range(pci_num_range):
                msg = "start hot-deleting %sth pci device," % (pci_num + 1)
                msg += " repeat %d" % (j + 1)
                error_context.context(msg, logging.info)
                pci_del(-(pci_num + 1))

            # cleanup network script after hot deleting pci device
            clean_network_scripts()
    finally:
        # clean network scripts on error
        clean_network_scripts()
        if params.get("enable_set_link", "yes") == "yes":
            error_context.context("Re-enabling the primary link(s) of guest",
                                  logging.info)
            for nic in vm.virtnet:
                vm.set_link(nic.device_id, up=True)
        if session:
            session.close()
Exemple #59
0
def run(test, params, env):
    """
    Virtio driver test for windows guest.
    1) boot guest with virtio device.
    2) enable and check driver verifier in guest.
    3) Uninstall and install driver:
    3.1) uninstall driver.
    3.2) install driver.
    3) Downgrade and upgrade driver:
    3.1) downgrade virtio driver to specified version.
    3.2) run subtest. (optional)
    3.3) upgrade virtio driver to original version.
    4) clear the driver verifier.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    driver = params["driver_name"]
    timeout = int(params.get("login_timeout", 360))
    sub_test = params.get("sub_test")
    cdrom_virtio_downgrade = params.get("cdrom_virtio_downgrade")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    error_context.context("Boot guest with %s device" % driver, logging.info)
    session = vm.wait_for_login(timeout=timeout)
    if params.get("need_enable_verifier", "yes") == "yes":
        error_context.context("Enable %s driver verifier in guest" % driver,
                              logging.info)
        session = utils_test.qemu.setup_win_driver_verifier(
            session, driver, vm, timeout)

    try:
        if params.get("need_uninstall") == "yes":
            error_context.context("Uninstall virtio driver", logging.info)
            single_driver_install.run(test, params, env)
            # Need install driver after uninstallation.
            params["need_uninstall"] = False
            error_context.context("Install virtio driver", logging.info)
        else:
            error_context.context("Downgrade virtio driver", logging.info)
            new_params = params.copy()
            new_params["cdrom_virtio"] = cdrom_virtio_downgrade
            vm.create(params=new_params)
            vm.verify_alive()
            single_driver_install.run(test, new_params, env)
            if sub_test:
                error_context.context("Run sub test %s" % sub_test,
                                      logging.info)
                utils_test.run_virt_sub_test(test, new_params, env, sub_test)
            error_context.context("Upgrade virtio driver to original",
                                  logging.info)
        vm.create(params=params)
        vm.verify_alive()
        single_driver_install.run(test, params, env)
    finally:
        if params.get("need_clear_verifier", "yes") == "yes":
            error_context.context("Clear %s driver verifier in guest" % driver,
                                  logging.info)
            session = utils_test.qemu.clear_win_driver_verifier(
                session, vm, timeout)
        vm.destroy()
Exemple #60
0
def run(test, params, env):
    """
    Test Step:
        1. Boot up two virtual machine
        2. Set openflow rules
        3. Run ping test, nc(tcp, udp) test, check whether openflow rules take
           effect.
    Params:
        :param test: QEMU test object
        :param params: Dictionary with the test parameters
        :param env: Dictionary with test environment.
    """
    def run_tcpdump_bg(session, addresses, dump_protocol):
        """
        Run tcpdump in background, tcpdump will exit once catch a packet
        match the rules.
        """
        tcpdump_cmd = "killall -9 tcpdump; "
        tcpdump_cmd += "tcpdump -iany -n -v %s and 'src %s and dst %s' -c 1 &"
        session.cmd_output_safe(tcpdump_cmd %
                                (dump_protocol, addresses[0], addresses[1]))
        if not utils_misc.wait_for(lambda: tcpdump_is_alive(session), 30, 0, 1,
                                   "Waiting tcpdump start..."):
            test.cancel("Error, can not run tcpdump")

    def dump_catch_data(session, dump_log, catch_reg):
        """
        Search data from dump_log
        """
        dump_info = session.cmd_output("cat %s" % dump_log)
        if re.findall(catch_reg, dump_info, re.I):
            return True
        return False

    def tcpdump_is_alive(session):
        """
        Check whether tcpdump is alive
        """
        if session.cmd_status("pidof tcpdump"):
            return False
        return True

    def tcpdump_catch_packet_test(session, drop_flow=False):
        """
        Check whether tcpdump catch match rules packets, once catch a packet
        match rules tcpdump will exit.
        when drop_flow is 'True', tcpdump couldn't catch any packets.
        """
        packet_receive = not tcpdump_is_alive(session)
        if packet_receive == drop_flow:
            err_msg = "Error, flow %s" % (drop_flow and "was" or "wasn't")
            err_msg += " dropped, tcpdump "
            err_msg += "%s " % (packet_receive and "can" or "can not")
            err_msg += "receive the packets"
            test.error(err_msg)
        logging.info(
            "Correct, flow %s dropped, tcpdump %s receive the packet" %
            ((drop_flow and "was" or "was not"),
             (packet_receive and "can" or "can not")))

    def arp_entry_clean(entry=None):
        """
        Clean arp catch in guest
        """
        if not entry:
            arp_clean_cmd = "arp -n | awk '/^[1-2]/{print \"arp -d \" $1}'|sh"
        else:
            arp_clean_cmd = "arp -d %s" % entry
        for session in sessions:
            session.cmd_output_safe(arp_clean_cmd)

    def check_arp_info(session, entry, vm, match_mac=None):
        arp_info = session.cmd_output("arp -n")
        arp_entries = [_ for _ in arp_info.splitlines() if re.match(entry, _)]

        match_string = match_mac or "incomplete"

        if not arp_entries:
            test.error("Can not find arp entry in %s: %s" %
                       (vm.name, arp_info))

        if not re.findall(match_string, arp_entries[0], re.I):
            test.fail("Can not find the mac address"
                      " %s of %s in arp"
                      " entry %s" % (mac, vm.name, arp_entries[0]))

    def ping_test(session, dst, drop_flow=False):
        """
        Ping test, check icmp
        """
        ping_status, ping_output = utils_test.ping(dest=dst,
                                                   count=10,
                                                   timeout=20,
                                                   session=session)
        # when drop_flow is true, ping should failed(return not zero)
        # drop_flow is false, ping should success
        packets_lost = 100
        if ping_status and not drop_flow:
            test.error("Ping should success when not drop_icmp")
        elif not ping_status:
            packets_lost = utils_test.get_loss_ratio(ping_output)
            if drop_flow and packets_lost != 100:
                test.error("When drop_icmp, ping shouldn't works")
            if not drop_flow and packets_lost == 100:
                test.error("When not drop_icmp, ping should works")

        info_msg = "Correct, icmp flow %s dropped, ping '%s', "
        info_msg += "packets lost rate is: '%s'"
        logging.info(info_msg %
                     ((drop_flow and "was" or "was not"),
                      (ping_status and "failed" or "success"), packets_lost))

    def run_ping_bg(vm, dst):
        """
        Run ping in background
        """
        ping_cmd = "ping %s" % dst
        session = vm.wait_for_login()
        logging.info("Ping %s in background" % dst)
        session.sendline(ping_cmd)
        return session

    def check_bg_ping(session):
        ping_pattern = r"\d+ bytes from \d+.\d+.\d+.\d+:"
        ping_pattern += r" icmp_seq=\d+ ttl=\d+ time=.*? ms"
        ping_failed_pattern = r"From .*? icmp_seq=\d+ Destination"
        ping_failed_pattern += r" Host Unreachable"
        try:
            out = session.read_until_output_matches(
                [ping_pattern, ping_failed_pattern])
            if re.search(ping_failed_pattern, out[1]):
                return False, out[1]
            else:
                return True, out[1]
        except Exception as msg:
            return False, msg

    def file_transfer(sessions, addresses, timeout):
        prepare_cmd = "dd if=/dev/zero of=/tmp/copy_file count=1024 bs=1M"
        md5_cmd = "md5sum /tmp/copy_file"
        port = params.get("shell_port")
        prompt = params.get("shell_prompt")
        username = params.get("username")
        password = params.get("password")
        sessions[0].cmd(prepare_cmd, timeout=timeout)
        ori_md5 = sessions[0].cmd_output(md5_cmd)
        scp_cmd = (r"scp -v -o UserKnownHostsFile=/dev/null "
                   r"-o StrictHostKeyChecking=no "
                   r"-o PreferredAuthentications=password -r "
                   r"-P %s /tmp/copy_file %s@\[%s\]:/tmp/copy_file" %
                   (port, username, addresses[1]))
        sessions[0].sendline(scp_cmd)
        remote.handle_prompts(sessions[0], username, password, prompt, 600)
        new_md5 = sessions[1].cmd_output(md5_cmd)
        for session in sessions:
            session.cmd("rm -f /tmp/copy_file")
        if new_md5 != ori_md5:
            test.fail("Md5 value changed after file transfer, "
                      "original is %s and the new file"
                      " is: %s" % (ori_md5, new_md5))

    def nc_connect_test(sessions,
                        addresses,
                        drop_flow=False,
                        nc_port="8899",
                        udp_model=False):
        """
        Nc connect test, check tcp and udp
        """
        nc_log = "/tmp/nc_log"
        server_cmd = "nc -l %s"
        client_cmd = "echo client | nc %s %s"
        if udp_model:
            server_cmd += " -u -w 3"
            client_cmd += " -u -w 3"
        server_cmd += " > %s &"
        client_cmd += " &"
        try:
            sessions[1].cmd_output_safe(server_cmd % (nc_port, nc_log))
            sessions[0].cmd_output_safe(client_cmd % (addresses[1], nc_port))

            nc_protocol = udp_model and "UDP" or "TCP"
            nc_connect = False
            if utils_misc.wait_for(
                    lambda: dump_catch_data(sessions[1], nc_log, "client"),
                    10,
                    0,
                    2,
                    text="Wait '%s' connect" % nc_protocol):
                nc_connect = True
            if nc_connect == drop_flow:
                err_msg = "Error, '%s' " % nc_protocol
                err_msg += "flow %s " % (drop_flow and "was" or "was not")
                err_msg += "dropped, nc connect should"
                err_msg += " '%s'" % (nc_connect and "failed" or "success")
                test.error(err_msg)

            logging.info("Correct, '%s' flow %s dropped, and nc connect %s" %
                         (nc_protocol, (drop_flow and "was" or "was not"),
                          (nc_connect and "success" or "failed")))
        finally:
            for session in sessions:
                session.cmd_output_safe("killall nc || killall ncat")
                session.cmd("%s %s" % (clean_cmd, nc_log),
                            ignore_all_errors=True)

    def acl_rules_check(acl_rules, flow_options):
        flow_options = re.sub("action=", "actions=", flow_options)
        if "arp" in flow_options:
            flow_options = re.sub("nw_src=", "arp_spa=", flow_options)
            flow_options = re.sub("nw_dst=", "arp_tpa=", flow_options)
        acl_options = re.split(",", flow_options)
        for line in acl_rules.splitlines():
            rule = [_.lower() for _ in re.split("[ ,]", line) if _]
            item_in_rule = 0

            for acl_item in acl_options:
                if acl_item.lower() in rule:
                    item_in_rule += 1

            if item_in_rule == len(acl_options):
                return True
        return False

    def remove_plus_items(open_flow_rules):
        plus_items = [
            "duration", "n_packets", "n_bytes", "idle_age", "hard_age"
        ]
        for plus_item in plus_items:
            open_flow_rules = re.sub("%s=.*?," % plus_item, "",
                                     open_flow_rules)
        return open_flow_rules

    timeout = int(params.get("login_timeout", '360'))
    prepare_timeout = int(params.get("prepare_timeout", '360'))
    clean_cmd = params.get("clean_cmd", "rm -f")
    sessions = []
    addresses = []
    vms = []
    bg_ping_session = None

    error_context.context("Init boot the vms")
    for vm_name in params.get("vms", "vm1 vm2").split():
        vms.append(env.get_vm(vm_name))
    for vm in vms:
        vm.verify_alive()
        sessions.append(vm.wait_for_login(timeout=timeout))
        addresses.append(vm.get_address())

    # set openflow rules:
    br_name = params.get("netdst", "ovs0")
    f_protocol = params.get("flow", "arp")
    f_base_options = "%s,nw_src=%s,nw_dst=%s" % (f_protocol, addresses[0],
                                                 addresses[1])
    for session in sessions:
        session.cmd("service iptables stop; iptables -F",
                    ignore_all_errors=True)

    try:
        for drop_flow in [True, False]:
            if drop_flow:
                f_command = "add-flow"
                f_options = f_base_options + ",action=drop"
                drop_icmp = eval(params.get("drop_icmp", 'True'))
                drop_tcp = eval(params.get("drop_tcp", 'True'))
                drop_udp = eval(params.get("drop_udp", 'True'))
            else:
                f_command = "mod-flows"
                f_options = f_base_options + ",action=normal"
                drop_icmp = False
                drop_tcp = False
                drop_udp = False

            error_context.base_context("Test prepare")
            error_context.context("Do %s %s on %s" %
                                  (f_command, f_options, br_name))
            utils_net.openflow_manager(br_name, f_command, f_options)
            acl_rules = utils_net.openflow_manager(br_name,
                                                   "dump-flows").stdout
            if not acl_rules_check(acl_rules, f_options):
                test.fail("Can not find the rules from"
                          " ovs-ofctl: %s" % acl_rules)

            error_context.context("Run tcpdump in guest %s" % vms[1].name,
                                  logging.info)
            run_tcpdump_bg(sessions[1], addresses, f_protocol)

            if drop_flow or f_protocol is not "arp":
                error_context.context("Clean arp cache in both guest",
                                      logging.info)
                arp_entry_clean(addresses[1])

            error_context.base_context(
                "Exec '%s' flow '%s' test" %
                (f_protocol, drop_flow and "drop" or "normal"))
            if drop_flow:
                error_context.context("Ping test form vm1 to vm2",
                                      logging.info)
                ping_test(sessions[0], addresses[1], drop_icmp)
                if params.get("run_file_transfer") == "yes":
                    error_context.context("Transfer file form vm1 to vm2",
                                          logging.info)
                    file_transfer(sessions, addresses, prepare_timeout)
            else:
                error_context.context(
                    "Ping test form vm1 to vm2 in "
                    "background", logging.info)
                bg_ping_session = run_ping_bg(vms[0], addresses[1])

            if f_protocol == 'arp' and drop_flow:
                error_context.context("Check arp inside %s" % vms[0].name,
                                      logging.info)
                check_arp_info(sessions[0], addresses[1], vms[0])
            elif f_protocol == 'arp' or params.get("check_arp") == "yes":
                time.sleep(2)
                error_context.context("Check arp inside guests.", logging.info)
                for index, address in enumerate(addresses):
                    sess_index = (index + 1) % 2
                    mac = vms[index].virtnet.get_mac_address(0)
                    check_arp_info(sessions[sess_index], address, vms[index],
                                   mac)

            error_context.context("Run nc connect test via tcp", logging.info)
            nc_connect_test(sessions, addresses, drop_tcp)

            error_context.context("Run nc connect test via udp", logging.info)
            nc_connect_test(sessions, addresses, drop_udp, udp_model=True)

            error_context.context("Check tcpdump data catch", logging.info)
            tcpdump_catch_packet_test(sessions[1], drop_flow)
    finally:
        openflow_rules_ori = utils_net.openflow_manager(br_name,
                                                        "dump-flows").stdout
        openflow_rules_ori = remove_plus_items(openflow_rules_ori)
        utils_net.openflow_manager(br_name, "del-flows", f_protocol)
        openflow_rules = utils_net.openflow_manager(br_name,
                                                    "dump-flows").stdout
        openflow_rules = remove_plus_items(openflow_rules)
        removed_rule = list(
            set(openflow_rules_ori.splitlines()) -
            set(openflow_rules.splitlines()))

        if f_protocol == "tcp":
            error_context.context("Run nc connect test via tcp", logging.info)
            nc_connect_test(sessions, addresses)
        elif f_protocol == "udp":
            error_context.context("Run nc connect test via udp", logging.info)
            nc_connect_test(sessions, addresses, udp_model=True)

        for session in sessions:
            session.close()
        failed_msg = []
        if (not removed_rule
                or not acl_rules_check(removed_rule[0], f_options)):
            failed_msg.append("Failed to delete %s" % f_options)
        if bg_ping_session:
            bg_ping_ok = check_bg_ping(bg_ping_session)
            bg_ping_session.close()
            if not bg_ping_ok[0]:
                failed_msg.append("There is something wrong happen in "
                                  "background ping: %s" % bg_ping_ok[1])

        if failed_msg:
            test.fail(failed_msg)