コード例 #1
0
ファイル: setup.py プロジェクト: intra2net/avocado-i2n
    def set_root(cls, params, object=None):
        """
        Set a root state to provide running object.

        All arguments match the base class.

        ..todo:: study better the environment pre/postprocessing details necessary
                 for flawless vm destruction and creation to improve these
        """
        vm_name = params["vms"]
        for image_name in params.objects("images"):
            image_params = params.object_params(image_name)
            image_name = image_params["image_name"]
            if not os.path.isabs(image_name):
                image_name = os.path.join(image_params["images_base_dir"], image_name)
            image_format = image_params.get("image_format")
            image_format = "" if image_format in ["raw", ""] else "." + image_format
            if not os.path.exists(image_name + image_format):
                logging.info("Creating image %s in order to boot %s",
                             image_name + image_format, vm_name)
                os.makedirs(os.path.dirname(image_name), exist_ok=True)
                image_params.update({"create_image": "yes", "force_create_image": "yes"})
                env_process.preprocess_image(None, image_params, image_name)
        if not params.get_boolean("use_env", True):
            return
        logging.info("Booting %s to provide boot state", vm_name)
        vm = object
        if vm is None:
            raise ValueError("Need an environmental object to boot")
            #vm = env.create_vm(params.get('vm_type'), params.get('target'),
            #                   vm_name, params, None)
        if not vm.is_alive():
            vm.create()
コード例 #2
0
ファイル: virtio_scsi_mq.py プロジェクト: liuyd96/tp-qemu
 def create_data_images():
     """ Create date image objects. """
     for extra_image in range(images_num):
         image_tag = "stg%s" % extra_image
         params["images"] += " %s" % image_tag
         params["image_name_%s" % image_tag] = "images/%s" % image_tag
         params["image_size_%s" % image_tag] = extra_image_size
         params["force_create_image_%s" % image_tag] = "yes"
         image_params = params.object_params(image_tag)
         env_process.preprocess_image(test, image_params, image_tag)
コード例 #3
0
        def test(self):
            self.copier_pid = None
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            self.disk_serial = params.get("drive_serial_image2_vm1",
                                          "nfs-disk-image2-vm1")
            self.disk_serial_src = params.get("drive_serial_image1_vm1",
                                              "root-image1-vm1")
            self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt")
            self.copy_timeout = int(params.get("copy_timeout", "1024"))

            self.copy_block_size = params.get("copy_block_size", "100M")
            self.copy_block_size = utils.convert_data_size(
                self.copy_block_size,
                "M")
            self.disk_size = "%s" % (self.copy_block_size * 1.4)
            self.copy_block_size /= 1024 * 1024

            self.server_recover_timeout = (
                int(params.get("server_recover_timeout", "240")))

            utils.run("mkdir -p %s" % (mount_path))

            self.test_params()
            self.config()

            self.vm_guest_params = params.copy()
            self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path
            self.vm_guest_params["image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand)
            self.vm_guest_params["image_size_image2_vm1"] = self.disk_size
            self.vm_guest_params = self.vm_guest_params.object_params("vm1")
            self.image2_vm_guest_params = (self.vm_guest_params.
                                           object_params("image2"))

            env_process.preprocess_image(test,
                                         self.image2_vm_guest_params,
                                         env)
            self.vm_guest.create(params=self.vm_guest_params)

            self.vm_guest.verify_alive()
            self.vm_guest.wait_for_login(timeout=login_timeout)
            self.workload()

            self.restart_server()

            self.vm_guest.migrate(mig_timeout, mig_protocol, env=env)

            try:
                self.vm_guest.verify_alive()
                self.vm_guest.wait_for_login(timeout=login_timeout)
            except aexpect.ExpectTimeoutError:
                raise error.TestFail("Migration should be successful.")
コード例 #4
0
 def _configure_images_params():
     for i in range(image_num):
         name = "stg%d" % i
         params['image_name_%s' % name] = "images/%s" % name
         params['image_size_%s' % name] = stg_image_size
         params["images"] = params["images"] + " " + name
         if params["drive_format"] == "scsi-hd":
             params["drive_bus_%s" % name] = 1
             params["blk_extra_params_%s" % name] = "lun=%d" % i
         image_params = params.object_params(name)
         env_process.preprocess_image(test, image_params, name)
コード例 #5
0
        def test(self):
            self.copier_pid = None
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            self.disk_serial = params.get("drive_serial_image2_vm1",
                                          "nfs-disk-image2-vm1")
            self.disk_serial_src = params.get("drive_serial_image1_vm1",
                                              "root-image1-vm1")
            self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt")
            self.copy_timeout = int(params.get("copy_timeout", "1024"))

            self.copy_block_size = params.get("copy_block_size", "100M")
            self.copy_block_size = utils.convert_data_size(
                self.copy_block_size, "M")
            self.disk_size = "%s" % (self.copy_block_size * 1.4)
            self.copy_block_size /= 1024 * 1024

            self.server_recover_timeout = (int(
                params.get("server_recover_timeout", "240")))

            utils.run("mkdir -p %s" % (mount_path))

            self.test_params()
            self.config()

            self.vm_guest_params = params.copy()
            self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path
            self.vm_guest_params[
                "image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand)
            self.vm_guest_params["image_size_image2_vm1"] = self.disk_size
            self.vm_guest_params = self.vm_guest_params.object_params("vm1")
            self.image2_vm_guest_params = (
                self.vm_guest_params.object_params("image2"))

            env_process.preprocess_image(test, self.image2_vm_guest_params,
                                         env)
            self.vm_guest.create(params=self.vm_guest_params)

            self.vm_guest.verify_alive()
            self.vm_guest.wait_for_login(timeout=login_timeout)
            self.workload()

            self.restart_server()

            self.vm_guest.migrate(mig_timeout, mig_protocol, env=env)

            try:
                self.vm_guest.verify_alive()
                self.vm_guest.wait_for_login(timeout=login_timeout)
            except aexpect.ExpectTimeoutError:
                raise error.TestFail("Migration should be successful.")
コード例 #6
0
 def configure_images_params(params):
     params_matrix = _create_params_matrix()
     _formats = params_matrix.pop('fmt', [params.get('drive_format')])
     formats = _formats[:]
     usb_port_occupied = 0
     usb_max_port = params.get('usb_max_port', 6)
     set_drive_bus = params.get('set_drive_bus', 'yes') == 'yes'
     no_disks = int(params['stg_image_num'])
     i = 0
     while i < no_disks:
         # Set the format
         if len(formats) < 1:
             if i == 0:
                 test.error("Fail to add any disks, probably bad"
                            " configuration.")
             logging.warn(
                 "Can't create desired number '%s' of disk types "
                 "'%s'. Using '%d' no disks.", no_disks, _formats, i)
             break
         name = 'stg%d' % i
         args = {
             'name': name,
             'filename': params_matrix['stg_image_name'] % i
         }
         fmt = random.choice(formats)
         drive_bus = None
         if set_drive_bus and fmt != 'virtio':
             drive_bus = str(i)
         if fmt == 'virtio_scsi':
             args['fmt'] = 'scsi-hd'
             args['scsi_hba'] = 'virtio-scsi-pci'
         elif fmt == 'lsi_scsi':
             args['fmt'] = 'scsi-hd'
             args['scsi_hba'] = 'lsi53c895a'
         elif fmt == 'spapr_vscsi':
             args['fmt'] = 'scsi-hd'
             args['scsi_hba'] = 'spapr-vscsi'
         elif fmt == 'usb2':
             usb_port_occupied += 1
             if usb_port_occupied > int(usb_max_port):
                 continue
             args['fmt'] = fmt
         else:
             args['fmt'] = fmt
         args['drive_bus'] = drive_bus
         # Other params
         for key, value in params_matrix.items():
             args[key] = random.choice(value)
         env_process.preprocess_image(
             test,
             convert_params(params, args).object_params(name), name)
         i += 1
コード例 #7
0
def run(test, params, env):
    """
    Run an gluster test.
    steps:
    1) create gluster brick if there is no one with good name
    2) create volume on brick
    3) create VM image on disk with specific format
    4) install vm on VM image
    5) boot VM
    6) start fio test on booted VM

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    error.context("Create gluster fs image")
    gluster_params = params.object_params("gluster")
    image_name = gluster_params.get("image_name")

    env_process.preprocess_image(test, gluster_params, image_name)
コード例 #8
0
def set_vm_for_dump(test, params):
    """
    Update vm mem and image size params to match dump generate and analysed.

    :param test: kvm test object
    :param params: Params object
    """
    host_free_mem = utils_misc.get_mem_info(attr='MemFree')
    host_avail_disk = int(process.getoutput(params["get_avail_disk"]))
    sys_image_size = int(
        float(utils_misc.normalize_data_size(params["image_size"], "G")))
    if host_avail_disk < (host_free_mem // 1024**2) * 1.2 + sys_image_size:
        params["mem"] = (host_avail_disk - sys_image_size) * 0.8 // 2.4 * 1024
    image_size_stg = int(
        float(utils_misc.normalize_data_size(params["mem"] + "M", "G")) * 1.4)
    params["image_size_stg"] = str(image_size_stg) + "G"

    params["force_create_image_stg"] = "yes"
    image_params = params.object_params("stg")
    env_process.preprocess_image(test, image_params, "stg")
コード例 #9
0
    def set_root(params, object=None):
        """
        Set a root state to provide object existence.

        :param params: configuration parameters
        :type params: {str, str}
        :param object: object whose states are manipulated
        :type object: VM object or None
        """
        vm_name = params["vms"]
        for image in params.objects("images"):
            image_params = params.object_params(image)
            image_name = image_params["image_name"]
            if not os.path.isabs(image_name):
                image_name = os.path.join(image_params["images_base_dir"],
                                          image_name)
            logging.info("Creating image %s for %s", image_name, vm_name)
            image_params.update({
                "create_image": "yes",
                "force_create_image": "yes"
            })
            env_process.preprocess_image(None, image_params, image_name)
コード例 #10
0
ファイル: setup.py プロジェクト: intra2net/avocado-i2n
    def set_root(cls, params, object=None):
        """
        Set a root state to provide object existence.

        :param params: configuration parameters
        :type params: {str, str}
        :param object: object whose states are manipulated
        :type object: VM object or None
        """
        vm_name = params["vms"]
        if object is not None and object.is_alive():
            object.destroy(gracefully=params.get_boolean("soft_boot", True))
        image_name = params["image_name"]
        if not os.path.isabs(image_name):
            image_name = os.path.join(params["images_base_dir"], image_name)
        image_format = params.get("image_format")
        image_format = "" if image_format in ["raw", ""] else "." + image_format
        if not os.path.exists(image_name + image_format):
            os.makedirs(os.path.dirname(image_name), exist_ok=True)
            logging.info("Creating image %s for %s", image_name, vm_name)
            params.update({"create_image": "yes", "force_create_image": "yes"})
            env_process.preprocess_image(None, params, image_name)
コード例 #11
0
ファイル: multi_disk.py プロジェクト: archerslaw/virt-test
def run_multi_disk(test, params, env):
    """
    Test multi disk suport of guest, this case will:
    1) Create disks image in configuration file.
    2) Start the guest with those disks.
    3) Checks qtree vs. test params.
    4) Format those disks.
    5) Copy file into / out of those disks.
    6) Compare the original file and the copied file using md5 or fc comand.
    7) Repeat steps 3-5 if needed.

    @param test: kvm test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    def _add_param(name, value):
        """ Converts name+value to stg_params string """
        if value:
            value = re.sub(' ', '\\ ', value)
            return " %s:%s " % (name, value)
        else:
            return ''

    stg_image_num = 0
    stg_params = params.get("stg_params", "")
    # Compatibility
    stg_params += _add_param("image_size", params.get("stg_image_size"))
    stg_params += _add_param("image_format", params.get("stg_image_format"))
    stg_params += _add_param("image_boot", params.get("stg_image_boot", "no"))
    stg_params += _add_param("drive_format", params.get("stg_drive_format"))
    if params.get("stg_assign_index") != "no":
        # Assume 0 and 1 are already occupied (hd0 and cdrom)
        stg_params += _add_param("drive_index", 'range(2,n)')
    param_matrix = {}

    stg_params = stg_params.split(' ')
    i = 0
    while i < len(stg_params) - 1:
        if not stg_params[i].strip():
            i += 1
            continue
        if stg_params[i][-1] == '\\':
            stg_params[i] = '%s %s' % (stg_params[i][:-1],
                                          stg_params.pop(i + 1))
        i += 1

    rerange = []
    has_name = False
    for i in xrange(len(stg_params)):
        if not stg_params[i].strip():
            continue
        (cmd, parm) = stg_params[i].split(':', 1)
        if cmd == "image_name":
            has_name = True
        if _RE_RANGE1.match(parm):
            parm = _range(parm)
            if parm == False:
                raise error.TestError("Incorrect cfg: stg_params %s looks "
                                      "like range(..) but doesn't contain "
                                      "numbers." % cmd)
            param_matrix[cmd] = parm
            if type(parm) is str:
                # When we know the stg_image_num, substitute it.
                rerange.append(cmd)
                continue
        else:
            # ',' separated list of values
            parm = parm.split(',')
            j = 0
            while j < len(parm) - 1:
                if parm[j][-1] == '\\':
                    parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1))
                j += 1
            param_matrix[cmd] = parm
        stg_image_num = max(stg_image_num, len(parm))

    stg_image_num = int(params.get('stg_image_num', stg_image_num))
    for cmd in rerange:
        param_matrix[cmd] = _range(param_matrix[cmd], stg_image_num)
    # param_table* are for pretty print of param_matrix
    param_table = []
    param_table_header = ['name']
    if not has_name:
        param_table_header.append('image_name')
    for _ in param_matrix:
        param_table_header.append(_)

    stg_image_name = params.get('stg_image_name', '%s')
    for i in xrange(stg_image_num):
        name = "stg%d" % i
        params['images'] += " %s" % name
        param_table.append([])
        param_table[-1].append(name)
        if not has_name:
            params["image_name_%s" % name] = stg_image_name % name
            param_table[-1].append(params.get("image_name_%s" % name))
        for parm in param_matrix.iteritems():
            params['%s_%s' % (parm[0], name)] = str(parm[1][i % len(parm[1])])
            param_table[-1].append(params.get('%s_%s' % (parm[0], name)))

    if params.get("multi_disk_params_only") == 'yes':
        # Only print the test param_matrix and finish
        logging.info('Newly added disks:\n%s',
                     utils.matrix_to_string(param_table, param_table_header))
        return

    # Always recreate VMs and disks
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        for image_name in vm_params.objects("images"):
            image_params = vm_params.object_params(image_name)
            env_process.preprocess_image(test, image_params, image_name)

    vm = env.get_vm(params["main_vm"])
    vm.create(timeout=max(10, stg_image_num), params=params)
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    images = params.get("images").split()
    n_repeat = int(params.get("n_repeat", "1"))
    image_num = len(images)
    file_system = params.get("file_system").split()
    fs_num = len(file_system)
    cmd_timeout = float(params.get("cmd_timeout", 360))
    re_str = params.get("re_str")
    black_list = params.get("black_list").split()

    error.context("verifying qtree vs. test params")
    err = 0
    qtree = qemu_qtree.QtreeContainer()
    qtree.parse_info_qtree(vm.monitor.info('qtree'))
    disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
    (tmp1, tmp2) = disks.parse_info_block(vm.monitor.info('block'))
    err += tmp1 + tmp2
    err += disks.generate_params()
    err += disks.check_disk_params(params)
    (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(
                                    session.cmd_output('cat /proc/scsi/scsi'))
    err += tmp1 + tmp2

    if err:
        raise error.TestFail("%s errors occurred while verifying qtree vs. "
                             "params" % err)
    if params.get('multi_disk_only_qtree') == 'yes':
        return

    try:
        if params.get("clean_cmd"):
            cmd = params.get("clean_cmd")
            session.cmd_status_output(cmd)
        if params.get("pre_cmd"):
            cmd = params.get("pre_cmd")
            error.context("creating partition on test disk")
            session.cmd(cmd, timeout=cmd_timeout)
        cmd = params.get("list_volume_command")
        output = session.cmd_output(cmd, timeout=cmd_timeout)
        disks = re.findall(re_str, output)
        disks = map(string.strip, disks)
        disks.sort()
        logging.debug("Volume list that meets regular expressions: '%s'", disks)
        if len(disks) < image_num:
            raise error.TestFail("Fail to list all the volumes!")

        if params.get("os_type") == "linux":
            df_output = session.cmd_output("df")
            li = re.findall("^/dev/(.*?)[ \d]", df_output, re.M)
            if li:
                black_list.extend(li)

        exclude_list = [d for d in disks if d in black_list]
        f = lambda d: logging.info("No need to check volume '%s'", d)
        map(f, exclude_list)

        disks = [d for d in disks if d not in exclude_list]

        for i in range(n_repeat):
            logging.info("iterations: %s", (i + 1))
            for disk in disks:
                disk = disk.strip()

                logging.info("Format disk: %s...", disk)
                index = random.randint(0, fs_num - 1)

                # Random select one file system from file_system
                fs = file_system[index].strip()
                cmd = params.get("format_command") % (fs, disk)
                error.context("formatting test disk")
                session.cmd(cmd, timeout=cmd_timeout)
                if params.get("mount_command"):
                    cmd = params.get("mount_command") % (disk, disk, disk)
                    session.cmd(cmd, timeout=cmd_timeout)

            for disk in disks:
                disk = disk.strip()

                logging.info("Performing I/O on disk: %s...", disk)
                cmd_list = params.get("cmd_list").split()
                for cmd_l in cmd_list:
                    if params.get(cmd_l):
                        cmd = params.get(cmd_l) % disk
                        session.cmd(cmd, timeout=cmd_timeout)

                cmd = params.get("compare_command")
                output = session.cmd_output(cmd)
                key_word = params.get("check_result_key_word")
                if key_word and key_word in output:
                    logging.debug("Guest's virtual disk %s works fine", disk)
                elif key_word:
                    raise error.TestFail("Files on guest os root fs and disk "
                                         "differ")
                else:
                    raise error.TestError("Param check_result_key_word was not "
                                          "specified! Please check your config")

            if params.get("umount_command"):
                cmd = params.get("show_mount_cmd")
                output = session.cmd_output(cmd)
                disks = re.findall(re_str, output)
                disks.sort()
                for disk in disks:
                    disk = disk.strip()
                    cmd = params.get("umount_command") % (disk, disk)
                    error.context("unmounting test disk")
                    session.cmd(cmd)
    finally:
        if params.get("post_cmd"):
            cmd = params.get("post_cmd")
            session.cmd(cmd)
        session.close()
コード例 #12
0
def run(test, params, env):
    """
    Qemu multiqueue test for virtio-scsi controller:

    1) Boot up a guest with virtio-scsi device which support multi-queue and
       the vcpu and images number of guest should match the multi-queue number
    2) Check the multi queue option from monitor
    3) Check device init status in guest
    4) Load I/O in all targets
    5) Check the interrupt queues in guest

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def proc_interrupts_results(results):
        results_dict = {}
        cpu_count = 0
        cpu_list = []
        for line in results.splitlines():
            line = line.strip()
            if re.match("CPU0", line):
                cpu_list = re.findall("CPU\d+", line)
                cpu_count = len(cpu_list)
                continue
            if cpu_count > 0:
                irq_key = re.split(":", line)[0]
                results_dict[irq_key] = {}
                content = line[len(irq_key) + 1:].strip()
                if len(re.split("\s+", content)) < cpu_count:
                    continue
                count = 0
                irq_des = ""
                for irq_item in re.split("\s+", content):
                    if count < cpu_count:
                        if count == 0:
                            results_dict[irq_key]["count"] = []
                        results_dict[irq_key]["count"].append(irq_item)
                    else:
                        irq_des += " %s" % irq_item
                    count += 1
                results_dict[irq_key]["irq_des"] = irq_des.strip()
        return results_dict, cpu_list

    timeout = float(params.get("login_timeout", 240))
    host_cpu_num = local_host.LocalHost().get_num_cpu()
    while host_cpu_num:
        num_queues = str(host_cpu_num)
        host_cpu_num &= host_cpu_num - 1
    params['smp'] = num_queues
    params['num_queues'] = num_queues
    images_num = int(num_queues)
    extra_image_size = params.get("image_size_extra_images", "512M")
    system_image = params.get("images")
    system_image_drive_format = params.get("system_image_drive_format", "ide")
    params["drive_format_%s" % system_image] = system_image_drive_format
    dev_type = params.get("dev_type", "i440FX-pcihost")

    error.context("Boot up guest with block devcie with num_queues"
                  " is %s and smp is %s" % (num_queues, params['smp']),
                  logging.info)
    for vm in env.get_all_vms():
        if vm.is_alive():
            vm.destroy()
    for extra_image in range(images_num):
        image_tag = "stg%s" % extra_image
        params["images"] += " %s" % image_tag
        params["image_name_%s" % image_tag] = "images/%s" % image_tag
        params["image_size_%s" % image_tag] = extra_image_size
        params["force_create_image_%s" % image_tag] = "yes"
        image_params = params.object_params(image_tag)
        env_process.preprocess_image(test, image_params, image_tag)

    params["start_vm"] = "yes"
    vm = env.get_vm(params["main_vm"])
    env_process.preprocess_vm(test, params, env, vm.name)
    session = vm.wait_for_login(timeout=timeout)

    error.context("Check irqbalance service status", logging.info)
    output = session.cmd_output("systemctl status irqbalance")
    if not re.findall("Active: active", output):
        session.cmd("systemctl start irqbalance")
        output = session.cmd_output("systemctl status irqbalance")
        output = utils_misc.strip_console_codes(output)
        if not re.findall("Active: active", output):
            raise error.TestNAError("Can not start irqbalance inside guest. "
                                    "Skip this test.")

    error.context("Pin vcpus to host cpus", logging.info)
    host_numa_nodes = utils_misc.NumaInfo()
    vcpu_num = 0
    for numa_node_id in host_numa_nodes.nodes:
        numa_node = host_numa_nodes.nodes[numa_node_id]
        for _ in range(len(numa_node.cpus)):
            if vcpu_num >= len(vm.vcpu_threads):
                break
            vcpu_tid = vm.vcpu_threads[vcpu_num]
            logging.debug("pin vcpu thread(%s) to cpu"
                          "(%s)" % (vcpu_tid,
                                    numa_node.pin_cpu(vcpu_tid)))
            vcpu_num += 1

    error.context("Verify num_queues from monitor", logging.info)
    qtree = qemu_qtree.QtreeContainer()
    try:
        qtree.parse_info_qtree(vm.monitor.info('qtree'))
    except AttributeError:
        raise error.TestNAError("Monitor deson't supoort qtree "
                                "skip this test")
    error_msg = "Number of queues mismatch: expect %s"
    error_msg += " report from monitor: %s(%s)"
    scsi_bus_addr = ""
    for qdev in qtree.get_qtree().get_children():
        if qdev.qtree["type"] == dev_type:
            for pci_bus in qdev.get_children():
                for pcic in pci_bus.get_children():
                    if pcic.qtree["class_name"] == "SCSI controller":
                        qtree_queues = pcic.qtree["num_queues"].split("(")[0]
                        if qtree_queues.strip() != num_queues.strip():
                            error_msg = error_msg % (num_queues,
                                                     qtree_queues,
                                                     pcic.qtree["num_queues"])
                            raise error.TestFail(error_msg)
                    if pcic.qtree["class_name"] == "SCSI controller":
                        scsi_bus_addr = pcic.qtree['addr']
                        break

    if not scsi_bus_addr:
        raise error.TestError("Didn't find addr from qtree. Please check "
                              "the log.")
    error.context("Check device init status in guest", logging.info)
    init_check_cmd = params.get("init_check_cmd", "dmesg | grep irq")
    output = session.cmd_output(init_check_cmd)
    irqs_pattern = params.get("irqs_pattern", "%s:\s+irq\s+(\d+)")
    irqs_pattern = irqs_pattern % scsi_bus_addr
    irqs_watch = re.findall(irqs_pattern, output)
    # As there are several interrupts count for virtio device:
    # config, control, event and request. And the each queue have
    # a request count. So the totally count for virtio device should
    # equal to queus number plus three.
    if len(irqs_watch) != 3 + int(num_queues):
        raise error.TestFail("Failed to check the interrupt ids from dmesg")
    irq_check_cmd = params.get("irq_check_cmd", "cat /proc/interrupts")
    output = session.cmd_output(irq_check_cmd)
    irq_results, _ = proc_interrupts_results(output)
    for irq_watch in irqs_watch:
        if irq_watch not in irq_results:
            raise error.TestFail("Can't find irq %s from procfs" % irq_watch)

    error.context("Load I/O in all targets", logging.info)
    get_dev_cmd = params.get("get_dev_cmd", "ls /dev/[svh]d*")
    output = session.cmd_output(get_dev_cmd)
    system_dev = re.findall("[svh]d(\w+)\d+", output)[0]
    dd_timeout = int(re.findall("\d+", extra_image_size)[0])
    fill_cmd = ""
    count = 0
    for dev in re.split("\s+", output):
        if not dev:
            continue
        if not re.findall("[svh]d%s" % system_dev, dev):
            fill_cmd += " dd of=%s if=/dev/urandom bs=1M " % dev
            fill_cmd += "count=%s &&" % dd_timeout
            count += 1
    if count != images_num:
        raise error.TestError("Disks are not all show up in system. Output "
                              "from the check command: %s" % output)
    fill_cmd = fill_cmd.rstrip("&&")
    session.cmd(fill_cmd, timeout=dd_timeout)

    error.context("Check the interrupt queues in guest", logging.info)
    output = session.cmd_output(irq_check_cmd)
    irq_results, cpu_list = proc_interrupts_results(output)
    irq_bit_map = 0
    for irq_watch in irqs_watch:
        if "request" in irq_results[irq_watch]["irq_des"]:
            for index, count in enumerate(irq_results[irq_watch]["count"]):
                if int(count) > 0:
                    irq_bit_map |= 2 ** index

    cpu_count = 0
    error_msg = ""
    cpu_not_used = []
    for index, cpu in enumerate(cpu_list):
        if 2 ** index & irq_bit_map != 2 ** index:
            cpu_not_used.append(cpu)

    if cpu_not_used:
        logging.debug("Interrupt info from procfs:\n%s" % output)
        error_msg = " ".join(cpu_not_used)
        if len(cpu_not_used) > 1:
            error_msg += " are"
        else:
            error_msg += " is"
        error_msg += " not used during test. Please check debug log for"
        error_msg += " more information."
        raise error.TestFail(error_msg)
コード例 #13
0
    def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices):
        """
        Inserts no_disks disks int qdev using randomized args from param_matrix
        :param qdev: qemu devices container
        :type qdev: virttest.qemu_devices.qcontainer.DevContainer
        :param param_matrix: Matrix of randomizable params
        :type param_matrix: list of lists
        :param no_disks: Desired number of disks
        :type no_disks: integer
        :param params: Dictionary with the test parameters
        :type params: virttest.utils_params.Params
        :return: (newly added devices, number of added disks)
        :rtype: tuple(list, integer)
        """
        dev_idx = 0
        _new_devs_fmt = ""
        _formats = param_matrix.pop('fmt', [params.get('drive_format')])
        formats = _formats[:]
        if len(new_devices) == 1:
            strict_mode = None
        else:
            strict_mode = True
        i = 0
        while i < no_disks:
            # Set the format
            if len(formats) < 1:
                if i == 0:
                    raise error.TestError("Fail to add any disks, probably bad"
                                          " configuration.")
                logging.warn("Can't create desired number '%s' of disk types "
                             "'%s'. Using '%d' no disks.", no_disks,
                             _formats, i)
                break
            name = 'stg%d' % i
            args = {'name': name, 'filename': stg_image_name % i}
            fmt = random.choice(formats)
            if fmt == 'virtio_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'virtio-scsi-pci'
            elif fmt == 'lsi_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'lsi53c895a'
            elif fmt == 'spapr_vscsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'spapr-vscsi'
            else:
                args['fmt'] = fmt
            # Other params
            for key, value in param_matrix.iteritems():
                args[key] = random.choice(value)

            try:
                devs = qdev.images_define_by_variables(**args)
                # parallel test adds devices in mixed order, force bus/addrs
                qdev.insert(devs, strict_mode)
            except utils.DeviceError:
                for dev in devs:
                    if dev in qdev:
                        qdev.remove(dev, recursive=True)
                formats.remove(fmt)
                continue

            params = convert_params(params, args)
            env_process.preprocess_image(test, params.object_params(name),
                                         name)
            new_devices[dev_idx].extend(devs)
            dev_idx = (dev_idx + 1) % len(new_devices)
            _new_devs_fmt += "%s(%s) " % (name, fmt)
            i += 1
        if _new_devs_fmt:
            logging.info("Using disks: %s", _new_devs_fmt[:-1])
        param_matrix['fmt'] = _formats
        return new_devices, params
コード例 #14
0
ファイル: nfs_corrupt.py プロジェクト: pezhang/tp-qemu
def run(test, params, env):
    """
    Test if VM paused when image NFS shutdown, the drive option 'werror' should
    be stop, the drive option 'cache' should be none.

    1) Setup NFS service on host
    2) Boot up a VM using another disk on NFS server and write the disk by dd
    3) Check if VM status is 'running'
    4) Reject NFS connection on host
    5) Check if VM status is 'paused'
    6) Accept NFS connection on host and continue VM by monitor command
    7) Check if VM status is 'running'

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def get_nfs_devname(params, session):
        """
        Get the possbile name of nfs storage dev name in guest.

        :param params: Test params dictionary.
        :param session: An SSH session object.
        """
        image1_type = params.object_params("image1").get("drive_format")
        stg_type = params.object_params("stg").get("drive_format")
        cmd = ""
        # Seems we can get correct 'stg' devname even if the 'stg' image
        # has a different type from main image (we call it 'image1' in
        # config file) with these 'if' sentences.
        if image1_type == stg_type:
            cmd = "ls /dev/[hsv]d[a-z]"
        elif stg_type == "virtio":
            cmd = "ls /dev/vd[a-z]"
        else:
            cmd = "ls /dev/[sh]d[a-z]"

        cmd += " | tail -n 1"
        return session.cmd_output(cmd).rstrip()

    def check_vm_status(vm, status):
        """
        Check if VM has the given status or not.

        :param vm: VM object.
        :param status: String with desired status.
        :return: True if VM status matches our desired status.
        :return: False if VM status does not match our desired status.
        """
        try:
            vm.verify_status(status)
        except (virt_vm.VMStatusError, qemu_monitor.MonitorLockError):
            return False
        else:
            return True

    error_context.context("Setup NFS Server on local host", logging.info)
    host_ip = utils_net.get_host_ip_address(params)
    try:
        config = NFSCorruptConfig(test, params, host_ip)
        config.setup()
    except NFSCorruptError as e:
        test.error(str(e))
    image_name = os.path.join(config.mnt_dir, "nfs_corrupt")
    params["image_name_stg"] = image_name
    params["force_create_image_stg"] = "yes"
    params["create_image_stg"] = "yes"
    stg_params = params.object_params("stg")

    error_context.context("Boot vm with image on NFS server", logging.info)
    env_process.preprocess_image(test, stg_params, image_name)

    vm = env.get_vm(params["main_vm"])
    try:
        vm.create(params=params)
    except Exception:
        config.cleanup()
        test.error("failed to create VM")
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    nfs_devname = get_nfs_devname(params, session)
    # Write disk on NFS server
    error_context.context("Write disk that image on NFS", logging.info)
    write_disk_cmd = "dd if=/dev/zero of=%s oflag=direct" % nfs_devname
    logging.info("dd with command: %s", write_disk_cmd)
    session.sendline(write_disk_cmd)
    try:
        # Read some command output, it will timeout
        session.read_up_to_prompt(timeout=30)
    except Exception:
        pass

    try:
        error_context.context("Make sure guest is running before test",
                              logging.info)
        vm.resume()
        vm.verify_status("running")

        try:
            error_context.context("Reject NFS connection on host",
                                  logging.info)
            process.system(config.iptables_rule_gen('A'))

            error_context.context("Check if VM status is 'paused'",
                                  logging.info)
            if not utils_misc.wait_for(
                lambda: check_vm_status(vm, "paused"),
                    int(params.get('wait_paused_timeout', 240))):
                test.error("Guest is not paused after stop NFS")
        finally:
            error_context.context("Accept NFS connection on host",
                                  logging.info)
            process.system(config.iptables_rule_gen('D'))

        error_context.context("Ensure nfs is resumed", logging.info)
        nfs_resume_timeout = int(params.get('nfs_resume_timeout', 240))
        if not utils_misc.wait_for(config.is_mounted_dir_acessible,
                                   nfs_resume_timeout):
            test.error("NFS connection does not resume")

        error_context.context("Continue guest", logging.info)
        vm.resume()

        error_context.context("Check if VM status is 'running'", logging.info)
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "running"), 20):
            test.error("Guest does not restore to 'running' status")

    finally:
        session.close()
        vm.destroy(gracefully=True)
        config.cleanup()
コード例 #15
0
    def insert_into_qdev(qdev, param_matrix, no_disks, params):
        """
        Inserts no_disks disks int qdev using randomized args from param_matrix
        :param qdev: qemu devices container
        :type qdev: virttest.qemu_devices.DevContainer
        :param param_matrix: Matrix of randomizable params
        :type param_matrix: list of lists
        :param no_disks: Desired number of disks
        :type no_disks: integer
        :param params: Dictionary with the test parameters
        :type params: virttest.utils_params.Params
        :return: (newly added devices, number of added disks)
        :rtype: tuple(list, integer)
        """
        new_devices = []
        _new_devs_fmt = ""
        _formats = param_matrix.pop('fmt', [params.get('drive_format')])
        formats = _formats[:]
        i = 0
        while i < no_disks:
            # Set the format
            if len(formats) < 1:
                logging.warn(
                    "Can't create desired number '%s' of disk types "
                    "'%s'. Using '%d' no disks.", no_disks, _formats, i)
                break
            name = 'stg%d' % i
            args = {'name': name, 'filename': stg_image_name % i}
            fmt = random.choice(formats)
            if fmt == 'virtio_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'virtio-scsi-pci'
            elif fmt == 'lsi_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'lsi53c895a'
            else:
                args['fmt'] = fmt
            # Other params
            for key, value in param_matrix.iteritems():
                args[key] = random.choice(value)

            devs = qdev.images_define_by_variables(**args)
            try:
                for dev in devs:
                    qdev.insert(dev, force=False)
            except qemu_devices.DeviceInsertError:
                # All buses are full, (TODO add bus) or remove this format
                for dev in devs:
                    if dev in qdev:
                        qdev.remove(dev, recursive=True)
                formats.remove(fmt)
                continue

            # TODO: Modify check_disk_params to use vm.devices
            # 1) modify PCI bus to accept full pci addr (02.0, 01.3, ...)
            # 2) add all devices into qemu_devices according to qtree
            # 3) check qtree vs. qemu_devices PCI representation (+children)
            #    (use qtree vs devices, if key and value_qtree == value_devices
            #     match the device and remove it from comparison.
            #     Also use blacklist to remove unnecessary stuff (like
            #     kvmclock, smbus-eeprom, ... from qtree and drive, ... from
            #     devices)
            # => then modify this to use qtree verification
            params = convert_params(params, args)
            env_process.preprocess_image(test, params.object_params(name),
                                         name)
            new_devices.extend(devs)
            _new_devs_fmt += "%s(%s) " % (name, fmt)
            i += 1
        if _new_devs_fmt:
            logging.info("Adding disks: %s", _new_devs_fmt[:-1])
        param_matrix['fmt'] = _formats
        return new_devices, params
コード例 #16
0
def run_nfs_corrupt(test, params, env):
    """
    Test if VM paused when image NFS shutdown, the drive option 'werror' should
    be stop, the drive option 'cache' should be none.

    1) Setup NFS service on host
    2) Boot up a VM using another disk on NFS server and write the disk by dd
    3) Check if VM status is 'running'
    4) Reject NFS connection on host
    5) Check if VM status is 'paused'
    6) Accept NFS connection on host and continue VM by monitor command
    7) Check if VM status is 'running'

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def get_nfs_devname(params, session):
        """
        Get the possbile name of nfs storage dev name in guest.

        :param params: Test params dictionary.
        :param session: An SSH session object.
        """
        image1_type = params.object_params("image1").get("drive_format")
        stg_type = params.object_params("stg").get("drive_format")
        cmd = ""
        # Seems we can get correct 'stg' devname even if the 'stg' image
        # has a different type from main image (we call it 'image1' in
        # config file) with these 'if' sentences.
        if image1_type == stg_type:
            cmd = "ls /dev/[hsv]d[a-z]"
        elif stg_type == "virtio":
            cmd = "ls /dev/vd[a-z]"
        else:
            cmd = "ls /dev/[sh]d[a-z]"

        cmd += " | tail -n 1"
        return session.cmd_output(cmd)

    def check_vm_status(vm, status):
        """
        Check if VM has the given status or not.

        :param vm: VM object.
        :param status: String with desired status.
        :return: True if VM status matches our desired status.
        :return: False if VM status does not match our desired status.
        """
        try:
            vm.verify_status(status)
        except:
            return False
        else:
            return True

    config = NFSCorruptConfig(test, params)
    config.setup()
    image_name = os.path.join(config.mnt_dir, 'nfs_corrupt')
    params["image_name_stg"] = image_name
    params["force_create_image_stg"] = "yes"
    params["create_image_stg"] = "yes"
    stg_params = params.object_params("stg")
    env_process.preprocess_image(test, stg_params, image_name)

    vm = env.get_vm(params["main_vm"])
    vm.create(params=params)
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    nfs_devname = get_nfs_devname(params, session)

    # Write disk on NFS server
    write_disk_cmd = "dd if=/dev/urandom of=%s" % nfs_devname
    logging.info("Write disk on NFS server, cmd: %s" % write_disk_cmd)
    session.sendline(write_disk_cmd)
    try:
        # Read some command output, it will timeout
        session.read_up_to_prompt(timeout=30)
    except:
        pass

    try:
        error.context("Make sure guest is running before test")
        vm.resume()
        vm.verify_status("running")

        try:
            cmd = "iptables"
            cmd += " -t filter"
            cmd += " -A INPUT"
            cmd += " -s localhost"
            cmd += " -m state"
            cmd += " --state NEW"
            cmd += " -p tcp"
            cmd += " --dport 2049"
            cmd += " -j REJECT"

            error.context("Reject NFS connection on host")
            utils.system(cmd)

            error.context("Check if VM status is 'paused'")
            if not utils_misc.wait_for(
                    lambda: check_vm_status(vm, "paused"),
                    int(params.get('wait_paused_timeout', 120))):
                raise error.TestError("Guest is not paused after stop NFS")
        finally:
            error.context("Accept NFS connection on host")
            cmd = "iptables"
            cmd += " -t filter"
            cmd += " -D INPUT"
            cmd += " -s localhost"
            cmd += " -m state"
            cmd += " --state NEW"
            cmd += " -p tcp"
            cmd += " --dport 2049"
            cmd += " -j REJECT"

            utils.system(cmd)

        error.context("Continue guest")
        vm.resume()

        error.context("Check if VM status is 'running'")
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "running"), 20):
            raise error.TestError("Guest does not restore to 'running' status")

    finally:
        session.close()
        vm.destroy(gracefully=True)
        config.cleanup()
コード例 #17
0
def run(test, params, env):
    """
    Qemu multiqueue test for virtio-scsi controller:

    1) Boot up a guest with virtio-scsi device which support multi-queue and
       the vcpu and images number of guest should match the multi-queue number
    2) Check the multi queue option from monitor
    3) Check device init status in guest
    4) Load I/O in all targets
    5) Check the interrupt queues in guest

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def proc_interrupts_results(results, irqs_pattern):
        results_dict = {}
        cpu_count = 0
        cpu_list = []
        for line in results.splitlines():
            line = line.strip()
            if re.match("CPU0", line):
                cpu_list = re.findall(r"CPU\d+", line)
                cpu_count = len(cpu_list)
                continue
            if cpu_count > 0:
                irq_key = re.split(":", line)[0]
                if re.findall(irqs_pattern, re.split(":", line)[-1]):
                    results_dict[irq_key] = {}
                    content = line[len(irq_key) + 1:].strip()
                    if len(re.split(r"\s+", content)) < cpu_count:
                        continue
                    count = 0
                    irq_des = ""
                    for irq_item in re.split(r"\s+", content):
                        if count < cpu_count:
                            if count == 0:
                                results_dict[irq_key]["count"] = []
                            results_dict[irq_key]["count"].append(irq_item)
                        else:
                            irq_des += " %s" % irq_item
                        count += 1
                    results_dict[irq_key]["irq_des"] = irq_des.strip()
        if not results_dict:
            test.error("Couldn't find virtio request interrupts from procfs")
        return results_dict, cpu_list

    timeout = float(params.get("login_timeout", 240))
    host_cpu_num = utils_cpu.online_cpus_count()
    while host_cpu_num:
        num_queues = str(host_cpu_num)
        host_cpu_num &= host_cpu_num - 1
    params['smp'] = num_queues
    params['num_queues'] = num_queues
    images_num = int(num_queues)
    extra_image_size = params.get("image_size_extra_images", "512M")
    system_image = params.get("images")
    system_image_drive_format = params.get("system_image_drive_format",
                                           "virtio")
    params["drive_format_%s" % system_image] = system_image_drive_format

    error_context.context("Boot up guest with block devcie with num_queues"
                          " is %s and smp is %s" % (num_queues, params['smp']),
                          logging.info)
    for vm in env.get_all_vms():
        if vm.is_alive():
            vm.destroy()
    for extra_image in range(images_num):
        image_tag = "stg%s" % extra_image
        params["images"] += " %s" % image_tag
        params["image_name_%s" % image_tag] = "images/%s" % image_tag
        params["image_size_%s" % image_tag] = extra_image_size
        params["force_create_image_%s" % image_tag] = "yes"
        image_params = params.object_params(image_tag)
        env_process.preprocess_image(test, image_params, image_tag)

    params["start_vm"] = "yes"
    vm = env.get_vm(params["main_vm"])
    env_process.preprocess_vm(test, params, env, vm.name)
    session = vm.wait_for_login(timeout=timeout)

    if params.get("os_type") == "windows":
        driver_name = params["driver_name"]
        wmi_check_cmd = params["wmi_check_cmd"]
        pattern = params["pattern"]
        session = utils_test.qemu.windrv_check_running_verifier(session, vm, test,
                                                                driver_name, timeout)
        wmi_check_cmd = utils_misc.set_winutils_letter(session, wmi_check_cmd)
        error_context.context("Run wmi check in guest.", logging.info)
        output = session.cmd_output(wmi_check_cmd)
        queue_num = re.findall(pattern, output, re.M)
        try:
            if not queue_num or queue_num[0] != num_queues:
                test.fail("The queue_num from guest is not match with expected.\n"
                          "queue_num from guest is %s, expected is %s"
                          % (queue_num, num_queues))
        finally:
            session.close()
            return
    error_context.context("Check irqbalance service status", logging.info)
    output = session.cmd_output("systemctl status irqbalance")
    if not re.findall("Active: active", output):
        session.cmd("systemctl start irqbalance")
        output = session.cmd_output("systemctl status irqbalance")
        output = utils_misc.strip_console_codes(output)
        if not re.findall("Active: active", output):
            test.cancel("Can not start irqbalance inside guest. "
                        "Skip this test.")

    error_context.context("Pin vcpus to host cpus", logging.info)
    host_numa_nodes = utils_misc.NumaInfo()
    vcpu_num = 0
    for numa_node_id in host_numa_nodes.nodes:
        numa_node = host_numa_nodes.nodes[numa_node_id]
        for _ in range(len(numa_node.cpus)):
            if vcpu_num >= len(vm.vcpu_threads):
                break
            vcpu_tid = vm.vcpu_threads[vcpu_num]
            logging.debug("pin vcpu thread(%s) to cpu"
                          "(%s)" % (vcpu_tid,
                                    numa_node.pin_cpu(vcpu_tid)))
            vcpu_num += 1

    error_context.context("Verify num_queues from monitor", logging.info)
    qtree = qemu_qtree.QtreeContainer()
    try:
        qtree.parse_info_qtree(vm.monitor.info('qtree'))
    except AttributeError:
        test.cancel("Monitor deson't supoort qtree skip this test")
    error_msg = "Number of queues mismatch: expect %s"
    error_msg += " report from monitor: %s(%s)"
    scsi_bus_addr = ""
    qtree_num_queues_full = ""
    qtree_num_queues = ""
    for node in qtree.get_nodes():
        if isinstance(node, qemu_qtree.QtreeDev) and (
                node.qtree['type'] == "virtio-scsi-device"):
            qtree_num_queues_full = node.qtree["num_queues"]
            qtree_num_queues = re.search(
                "[0-9]+",
                qtree_num_queues_full).group()
        elif isinstance(node, qemu_qtree.QtreeDev) and node.qtree['type'] == "virtio-scsi-pci":
            scsi_bus_addr = node.qtree['addr']

    if qtree_num_queues != num_queues:
        error_msg = error_msg % (num_queues,
                                 qtree_num_queues,
                                 qtree_num_queues_full)
        test.fail(error_msg)
    if not scsi_bus_addr:
        test.error("Didn't find addr from qtree. Please check the log.")
    error_context.context("Check device init status in guest", logging.info)
    irq_check_cmd = params.get("irq_check_cmd", "cat /proc/interrupts")
    output = session.cmd_output(irq_check_cmd)
    irq_name = params.get("irq_regex")
    prev_irq_results, _ = proc_interrupts_results(output, irq_name)
    logging.debug('The info of interrupters before testing:')
    for irq_watch in prev_irq_results.keys():
        logging.debug('%s : %s %s' % (irq_watch, prev_irq_results[irq_watch]['count'],
                                      prev_irq_results[irq_watch]['irq_des']))

    error_context.context("Pin the interrupters to vcpus", logging.info)
    cpu_select = 1
    for irq_id in prev_irq_results.keys():
        bind_cpu_cmd = "echo %s > /proc/irq/%s/smp_affinity" % \
                       (hex(cpu_select).replace('0x', ''), irq_id)
        cpu_select = cpu_select << 1
        session.cmd(bind_cpu_cmd)

    error_context.context("Load I/O in all targets", logging.info)
    get_dev_cmd = params.get("get_dev_cmd", "ls /dev/[svh]d*")
    output = session.cmd_output(get_dev_cmd)
    system_dev = re.findall(r"/dev/[svh]d\w+(?=\d+)", output)[0]
    dd_timeout = int(re.findall(r"\d+", extra_image_size)[0])
    fill_cmd = ""
    count = 0
    for dev in re.split(r"\s+", output):
        if not dev:
            continue
        if not re.findall(system_dev, dev):
            fill_cmd += " dd of=%s if=/dev/urandom bs=1M " % dev
            fill_cmd += "count=%s oflag=direct &" % dd_timeout
            count += 1
    if count != images_num:
        test.error("Disks are not all show up in system. Output "
                   "from the check command: %s" % output)
    # As Bug 1177332 exists, mq is not supported completely.
    # So don't considering performance currently, dd_timeout is longer.
    dd_timeout = dd_timeout * images_num * 2
    session.cmd(fill_cmd, timeout=dd_timeout)

    dd_thread_num = count
    while dd_thread_num:
        time.sleep(5)
        dd_thread_num = session.cmd_output("pgrep -x dd", timeout=dd_timeout)

    error_context.context("Check the interrupt queues in guest", logging.info)
    output = session.cmd_output(irq_check_cmd)
    next_irq_results, cpu_list = proc_interrupts_results(output, irq_name)
    logging.debug('The info of interrupters after testing :')
    for irq_watch in next_irq_results.keys():
        logging.debug('%s : %s %s' % (irq_watch, next_irq_results[irq_watch]['count'],
                                      next_irq_results[irq_watch]['irq_des']))
    irq_bit_map = 0
    for irq_watch in next_irq_results.keys():
        for index, count in enumerate(next_irq_results[irq_watch]["count"]):
            if (int(count) - int(prev_irq_results[irq_watch]["count"][index])) > 0:
                irq_bit_map |= 2 ** index

    error_msg = ""
    cpu_not_used = []
    for index, cpu in enumerate(cpu_list):
        if 2 ** index & irq_bit_map != 2 ** index:
            cpu_not_used.append(cpu)

    if cpu_not_used:
        logging.debug("Interrupt info from procfs:\n%s" % output)
        error_msg = " ".join(cpu_not_used)
        if len(cpu_not_used) > 1:
            error_msg += " are"
        else:
            error_msg += " is"
        error_msg += " not used during test. Please check debug log for"
        error_msg += " more information."
        test.fail(error_msg)
コード例 #18
0
    def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices):
        """
        Inserts no_disks disks int qdev using randomized args from param_matrix
        :param qdev: qemu devices container
        :type qdev: virttest.qemu_devices.qcontainer.DevContainer
        :param param_matrix: Matrix of randomizable params
        :type param_matrix: list of lists
        :param no_disks: Desired number of disks
        :type no_disks: integer
        :param params: Dictionary with the test parameters
        :type params: virttest.utils_params.Params
        :return: (newly added devices, number of added disks)
        :rtype: tuple(list, integer)
        """
        dev_idx = 0
        _new_devs_fmt = ""
        pci_bus = {'aobject': 'pci.0'}
        _formats = param_matrix.pop('fmt', [params.get('drive_format')])
        formats = _formats[:]
        if len(new_devices) == 1:
            strict_mode = None
        else:
            strict_mode = True
        i = 0
        while i < no_disks:
            # Set the format
            if len(formats) < 1:
                if i == 0:
                    test.error("Fail to add any disks, probably bad"
                               " configuration.")
                logging.warn(
                    "Can't create desired number '%s' of disk types "
                    "'%s'. Using '%d' no disks.", no_disks, _formats, i)
                break
            name = 'stg%d' % i
            args = {
                'name': name,
                'filename': stg_image_name % i,
                'pci_bus': pci_bus
            }
            fmt = random.choice(formats)
            if fmt == 'virtio_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'virtio-scsi-pci'
            elif fmt == 'lsi_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'lsi53c895a'
            elif fmt == 'spapr_vscsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'spapr-vscsi'
            else:
                args['fmt'] = fmt
            args['imgfmt'] = params['image_format_%s' % name] if params.get(
                'image_format_%s' % name) else params['image_format']
            # Other params
            for key, value in param_matrix.items():
                args[key] = random.choice(value)

            try:
                devs = qdev.images_define_by_variables(**args)
                # parallel test adds devices in mixed order, force bus/addrs
                qdev.insert(devs, strict_mode)
            except utils.DeviceError:
                for dev in devs:
                    if dev in qdev:
                        qdev.remove(dev, recursive=True)
                formats.remove(fmt)
                continue

            params = convert_params(params, args)
            env_process.preprocess_image(test, params.object_params(name),
                                         name)
            new_devices[dev_idx].extend(devs)
            dev_idx = (dev_idx + 1) % len(new_devices)
            _new_devs_fmt += "%s(%s) " % (name, fmt)
            i += 1
        if _new_devs_fmt:
            logging.info("Using disks: %s", _new_devs_fmt[:-1])
        param_matrix['fmt'] = _formats
        return new_devices, params
コード例 #19
0
    def insert_into_qdev(qdev, param_matrix, no_disks, params):
        """
        Inserts no_disks disks int qdev using randomized args from param_matrix
        :param qdev: qemu devices container
        :type qdev: virttest.qemu_devices.DevContainer
        :param param_matrix: Matrix of randomizable params
        :type param_matrix: list of lists
        :param no_disks: Desired number of disks
        :type no_disks: integer
        :param params: Dictionary with the test parameters
        :type params: virttest.utils_params.Params
        :return: (newly added devices, number of added disks)
        :rtype: tuple(list, integer)
        """
        new_devices = []
        _new_devs_fmt = ""
        _formats = param_matrix.pop('fmt', [params.get('drive_format')])
        formats = _formats[:]
        i = 0
        while i < no_disks:
            # Set the format
            if len(formats) < 1:
                logging.warn("Can't create desired number '%s' of disk types "
                             "'%s'. Using '%d' no disks.", no_disks,
                             _formats, i)
                break
            name = 'stg%d' % i
            args = {'name': name, 'filename': stg_image_name % i}
            fmt = random.choice(formats)
            if fmt == 'virtio_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'virtio-scsi-pci'
            elif fmt == 'lsi_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'lsi53c895a'
            else:
                args['fmt'] = fmt
            # Other params
            for key, value in param_matrix.iteritems():
                args[key] = random.choice(value)

            devs = qdev.images_define_by_variables(**args)
            try:
                for dev in devs:
                    qdev.insert(dev, force=False)
            except qemu_devices.DeviceInsertError:
                # All buses are full, (TODO add bus) or remove this format
                for dev in devs:
                    if dev in qdev:
                        qdev.remove(dev, recursive=True)
                formats.remove(fmt)
                continue

            # TODO: Modify check_disk_params to use vm.devices
            # 1) modify PCI bus to accept full pci addr (02.0, 01.3, ...)
            # 2) add all devices into qemu_devices according to qtree
            # 3) check qtree vs. qemu_devices PCI representation (+children)
            #    (use qtree vs devices, if key and value_qtree == value_devices
            #     match the device and remove it from comparison.
            #     Also use blacklist to remove unnecessary stuff (like
            #     kvmclock, smbus-eeprom, ... from qtree and drive, ... from
            #     devices)
            # => then modify this to use qtree verification
            params = convert_params(params, args)
            env_process.preprocess_image(test, params.object_params(name),
                                         name)
            new_devices.extend(devs)
            _new_devs_fmt += "%s(%s) " % (name, fmt)
            i += 1
        if _new_devs_fmt:
            logging.info("Adding disks: %s", _new_devs_fmt[:-1])
        param_matrix['fmt'] = _formats
        return new_devices, params
コード例 #20
0
def run(test, params, env):
    """
    Test to install the guest OS on the lvm device which is created
    on an iSCSI target.
    Steps:
        1) Setup iSCSI initiator on local host.
        2) Discovery and login the above iSCSI target.
        3) Send sg_inq to get information on the host.
        4) Boot guest with this lun as a block device as the second
           disk, with scsi=on,format=raw,werror=stop,rerror=stop.
        5) In the guest, sg_inq should show similar information in
           step 3.
        6) Logout iscsi server.
        7) Check the disk info with sg_inq inside guest, should show
           fail information.
        8) Run dd on this disk, the guest should stop.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    def fetch_sg_info(device, session=None):
        cmd = params['cmd_sg_inq'] % device
        if session:
            return session.cmd_output(cmd)
        return process.getoutput(cmd, 60, ignore_status=False)

    iscsi = Iscsi.create_iSCSI(params, data_dir.get_data_dir())
    try:
        iscsi.login()
        if not utils_misc.wait_for(lambda: iscsi.get_device_name(), 60):
            test.error('Can not get the iSCSI device.')

        cmd_get_disk_path = params['cmd_get_disk_path']
        disk_path = process.system_output(cmd_get_disk_path, 60,
                                          shell=True).decode()

        host_sg_info = fetch_sg_info(disk_path)
        logging.info('The scsi generic info from host: %s', host_sg_info)

        image_data_tag = params['image_data_tag']
        params['image_name_%s' % image_data_tag] = disk_path
        params['image_size'] = params['emulated_image_size']
        image_params = params.object_params(image_data_tag)
        env_process.preprocess_image(test, image_params, image_data_tag)

        params["start_vm"] = "yes"
        env_process.preprocess_vm(test, params, env, params["main_vm"])
        vm = env.get_vm(params["main_vm"])
        vm.verify_alive()
        session = vm.wait_for_login()

        data_disk = '/dev/' + list(get_linux_disks(session).keys()).pop()
        guest_sg_info = fetch_sg_info(data_disk, session)
        logging.info('The scsi generic info from guest: %s', guest_sg_info)

        for info in guest_sg_info.split():
            if info not in host_sg_info:
                test.fail('The guest scsi generic info is not similar to host.')

        iscsi.logout()
        if params['sg_fail_info'] not in fetch_sg_info(data_disk, session):
            test.fail('No found the fail information after logout iscsi server.')

        session.cmd_output(params['cmd_dd'] % data_disk)
        vm_status_paused = params['vm_status_paused']
        if not utils_misc.wait_for(
                lambda: vm.monitor.verify_status(vm_status_paused), 120, step=3):
            test.fail('The vm status is not %s.' % vm_status_paused)
    finally:
        iscsi.delete_target()
コード例 #21
0
ファイル: nfs_corrupt.py プロジェクト: FengYang/virt-test
def run_nfs_corrupt(test, params, env):
    """
    Test if VM paused when image NFS shutdown, the drive option 'werror' should
    be stop, the drive option 'cache' should be none.

    1) Setup NFS service on host
    2) Boot up a VM using another disk on NFS server and write the disk by dd
    3) Check if VM status is 'running'
    4) Reject NFS connection on host
    5) Check if VM status is 'paused'
    6) Accept NFS connection on host and continue VM by monitor command
    7) Check if VM status is 'running'

    @param test: kvm test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    def get_nfs_devname(params, session):
        """
        Get the possbile name of nfs storage dev name in guest.

        @param params: Test params dictionary.
        @param session: An SSH session object.
        """
        image1_type = params.object_params("image1").get("drive_format")
        stg_type = params.object_params("stg").get("drive_format")
        cmd = ""
        # Seems we can get correct 'stg' devname even if the 'stg' image
        # has a different type from main image (we call it 'image1' in
        # config file) with these 'if' sentences.
        if image1_type == stg_type:
            cmd = "ls /dev/[hsv]d[a-z]"
        elif stg_type == "virtio":
            cmd = "ls /dev/vd[a-z]"
        else:
            cmd = "ls /dev/[sh]d[a-z]"

        cmd += " | tail -n 1"
        return session.cmd_output(cmd)


    def check_vm_status(vm, status):
        """
        Check if VM has the given status or not.

        @param vm: VM object.
        @param status: String with desired status.
        @return: True if VM status matches our desired status.
        @return: False if VM status does not match our desired status.
        """
        try:
            vm.verify_status(status)
        except:
            return False
        else:
            return True


    config = NFSCorruptConfig(test, params)
    config.setup()
    image_name = os.path.join(config.mnt_dir, 'nfs_corrupt')
    params["image_name_stg"] = image_name
    params["force_create_image_stg"] = "yes"
    params["create_image_stg"] = "yes"
    stg_params = params.object_params("stg")
    env_process.preprocess_image(test, stg_params, image_name)

    vm = env.get_vm(params["main_vm"])
    vm.create(params=params)
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    nfs_devname = get_nfs_devname(params, session)

    # Write disk on NFS server
    write_disk_cmd = "dd if=/dev/urandom of=%s" % nfs_devname
    logging.info("Write disk on NFS server, cmd: %s" % write_disk_cmd)
    session.sendline(write_disk_cmd)
    try:
        # Read some command output, it will timeout
        session.read_up_to_prompt(timeout=30)
    except:
        pass

    try:
        error.context("Make sure guest is running before test")
        vm.resume()
        vm.verify_status("running")

        try:
            cmd = "iptables"
            cmd += " -t filter"
            cmd += " -A INPUT"
            cmd += " -s localhost"
            cmd += " -m state"
            cmd += " --state NEW"
            cmd += " -p tcp"
            cmd += " --dport 2049"
            cmd += " -j REJECT"

            error.context("Reject NFS connection on host")
            utils.system(cmd)

            error.context("Check if VM status is 'paused'")
            if not utils_misc.wait_for(
                                lambda: check_vm_status(vm, "paused"),
                                int(params.get('wait_paused_timeout', 120))):
                raise error.TestError("Guest is not paused after stop NFS")
        finally:
            error.context("Accept NFS connection on host")
            cmd = "iptables"
            cmd += " -t filter"
            cmd += " -D INPUT"
            cmd += " -s localhost"
            cmd += " -m state"
            cmd += " --state NEW"
            cmd += " -p tcp"
            cmd += " --dport 2049"
            cmd += " -j REJECT"

            utils.system(cmd)

        error.context("Continue guest")
        vm.resume()

        error.context("Check if VM status is 'running'")
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "running"), 20):
            raise error.TestError("Guest does not restore to 'running' status")

    finally:
        session.close()
        vm.destroy(gracefully=True)
        config.cleanup()
コード例 #22
0
ファイル: nfs_corrupt.py プロジェクト: zixi-chen/tp-qemu
def run(test, params, env):
    """
    Test if VM paused when image NFS shutdown, the drive option 'werror' should
    be stop, the drive option 'cache' should be none.

    1) Setup NFS service on host
    2) Boot up a VM using another disk on NFS server and write the disk by dd
    3) Check if VM status is 'running'
    4) Reject NFS connection on host
    5) Check if VM status is 'paused'
    6) Accept NFS connection on host and continue VM by monitor command
    7) Check if VM status is 'running'

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def get_nfs_devname(params, session):
        """
        Get the possbile name of nfs storage dev name in guest.

        :param params: Test params dictionary.
        :param session: An SSH session object.
        """
        image1_type = params.object_params("image1").get("drive_format")
        stg_type = params.object_params("stg").get("drive_format")
        cmd = ""
        # Seems we can get correct 'stg' devname even if the 'stg' image
        # has a different type from main image (we call it 'image1' in
        # config file) with these 'if' sentences.
        if image1_type == stg_type:
            cmd = "ls /dev/[hsv]d[a-z]"
        elif stg_type == "virtio":
            cmd = "ls /dev/vd[a-z]"
        else:
            cmd = "ls /dev/[sh]d[a-z]"

        cmd += " | tail -n 1"
        return session.cmd_output(cmd).rstrip()

    def check_vm_status(vm, status):
        """
        Check if VM has the given status or not.

        :param vm: VM object.
        :param status: String with desired status.
        :return: True if VM status matches our desired status.
        :return: False if VM status does not match our desired status.
        """
        try:
            vm.verify_status(status)
        except (virt_vm.VMStatusError, qemu_monitor.MonitorLockError):
            return False
        else:
            return True

    error_context.context("Setup NFS Server on local host", logging.info)
    host_ip = utils_net.get_host_ip_address(params)
    try:
        config = NFSCorruptConfig(test, params, host_ip)
        config.setup()
    except NFSCorruptError as e:
        test.error(str(e))
    image_name = os.path.join(config.mnt_dir, "nfs_corrupt")
    params["image_name_stg"] = image_name
    params["force_create_image_stg"] = "yes"
    params["create_image_stg"] = "yes"
    stg_params = params.object_params("stg")

    error_context.context("Boot vm with image on NFS server", logging.info)
    env_process.preprocess_image(test, stg_params, image_name)

    vm = env.get_vm(params["main_vm"])
    try:
        vm.create(params=params)
    except Exception:
        config.cleanup()
        test.error("failed to create VM")
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    nfs_devname = get_nfs_devname(params, session)
    # Write disk on NFS server
    error_context.context("Write disk that image on NFS", logging.info)
    write_disk_cmd = "dd if=/dev/zero of=%s oflag=direct" % nfs_devname
    logging.info("dd with command: %s", write_disk_cmd)
    session.sendline(write_disk_cmd)
    try:
        # Read some command output, it will timeout
        session.read_up_to_prompt(timeout=30)
    except Exception:
        pass

    try:
        error_context.context("Make sure guest is running before test",
                              logging.info)
        vm.resume()
        vm.verify_status("running")

        try:
            error_context.context("Reject NFS connection on host",
                                  logging.info)
            process.system(config.iptables_rule_gen('A'))

            error_context.context("Check if VM status is 'paused'",
                                  logging.info)
            if not utils_misc.wait_for(
                    lambda: check_vm_status(vm, "paused"),
                    int(params.get('wait_paused_timeout', 240))):
                test.error("Guest is not paused after stop NFS")
        finally:
            error_context.context("Accept NFS connection on host",
                                  logging.info)
            process.system(config.iptables_rule_gen('D'))

        error_context.context("Ensure nfs is resumed", logging.info)
        nfs_resume_timeout = int(params.get('nfs_resume_timeout', 240))
        if not utils_misc.wait_for(config.is_mounted_dir_acessible,
                                   nfs_resume_timeout):
            test.error("NFS connection does not resume")

        error_context.context("Continue guest", logging.info)
        vm.resume()

        error_context.context("Check if VM status is 'running'", logging.info)
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "running"), 20):
            test.error("Guest does not restore to 'running' status")

    finally:
        session.close()
        vm.destroy(gracefully=True)
        config.cleanup()