Example #1
0
def format_disk(test, vm, device, partsize):
    """
    Create a partition on given disk and check it.
    """
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status("ls %s" % device):
        test.fail("Can not find '%s' in guest." % device)
    else:
        if session.cmd_status("which parted"):
            logging.error("Did not find command 'parted' in guest, SKIP...")
            return

    try:
        partition = "%s1" % device
        if session.cmd_status("ls %s" % partition):
            utlv.mk_part(device, size=partsize, session=session)
        utlv.mkfs(partition, "ext4", session=session)
    except Exception as detail:
        test.fail("Create&format partition for '%s' failed: %s"
                  % (device, str(detail)))
    finally:
        logging.debug(session.cmd_output("parted -l"))
        session.close()
Example #2
0
def format_disk(test, vm, device, partsize):
    """
    Create a partition on given disk and check it.
    """
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status("ls %s" % device):
        test.fail("Can not find '%s' in guest." % device)
    else:
        if session.cmd_status("which parted"):
            logging.error("Did not find command 'parted' in guest, SKIP...")
            return

    try:
        partition = "%s1" % device
        if session.cmd_status("ls %s" % partition):
            utlv.mk_part(device, size=partsize, session=session)
        utlv.mkfs(partition, "ext4", session=session)
    except Exception as detail:
        test.fail("Create&format partition for '%s' failed: %s"
                  % (device, str(detail)))
    finally:
        logging.debug(session.cmd_output("parted -l"))
        session.close()
    def check_in_vm(vm, target):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                raise error.TestFail(
                    "Failed to query/install parted, make sure"
                    " that you have usable repo in guest")

            if target == "hda":
                target = "sda"
            libvirt.mk_part("/dev/%s" % target, size="10M", session=session)
            libvirt.mkfs("/dev/%s1" % target, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % target)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                session.close()
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
            logging.error(str(e))
            return False
    def check_in_vm(vm, target):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                raise error.TestFail("Failed to query/install parted, make sure"
                                     " that you have usable repo in guest")

            if target == "hda":
                target = "sda"
            libvirt.mk_part("/dev/%s" % target, size="10M", session=session)
            libvirt.mkfs("/dev/%s1" % target, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % target)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                session.close()
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
            logging.error(str(e))
            return False
Example #5
0
def fill_null_in_vm(vm, target, size_value=500):
    """
    File something in the disk of VM

    :param vm: VM guest
    :param target: disk dev in VM
    :param size_value: number in MiB
    """
    try:
        session = vm.wait_for_login()
        if not utils_package.package_install(["parted"], session, timeout=300):
            logging.error("Failed to install the required 'parted' package")
        device_source = os.path.join(os.sep, 'dev', target)
        libvirt.mk_label(device_source, session=session)
        libvirt.mk_part(device_source,
                        size="%sM" % size_value,
                        session=session)
        # Run partprobe to make the change take effect.
        process.run("partprobe", ignore_status=True, shell=True)
        libvirt.mkfs("/dev/%s1" % target, "ext3", session=session)
        count_number = size_value - 100
        cmd = (
            "mount /dev/%s1 /mnt && dd if=/dev/zero of=/mnt/testfile bs=1024 count=1024x%s "
            " && umount /mnt" % (target, count_number))
        s, o = session.cmd_status_output(cmd)
        logging.info("Check disk operation in VM:\n%s", o)
        session.close()
        if s != 0:
            raise exceptions.TestError(
                "Error happened when executing command:\n%s" % cmd)
    except Exception as e:
        raise exceptions.TestError(str(e))
    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                raise exceptions.TestFail(
                    "Failed to query/install parted, make sure"
                    " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            libvirt.mk_part("/dev/%s" % added_part,
                            size="10M",
                            session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
            logging.error(str(e))
            return False
Example #7
0
def make_dasd_fs(path, part, session):
    """
    Erases disk and creates new partition with filesystem

    :param path: the disk path, e.g. /dev/dasda
    :param part: partition path, e.g. /dev/dasda1
    :param session: guest session
    """

    format_dasd(path, session)
    make_dasd_part(path, session)
    mkfs(part, "ext3", session=session)
    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False
Example #9
0
    def prepare_disk(path, disk_format):
        """
        Prepare the disk for a given disk format.
        """
        disk = {}
        # Check if we test with a non-existed disk.
        if os.path.split(path)[-1].startswith("notexist."):
            disk.update({"format": disk_format,
                         "source": path})

        elif disk_format == "scsi":
            scsi_option = params.get("virt_disk_device_scsi_option", "")
            disk_source = libvirt.create_scsi_disk(scsi_option)
            if disk_source:
                disk.update({"format": "scsi",
                             "source": disk_source})
            else:
                raise exceptions.TestSkipError("Get scsi disk failed")

        elif disk_format in ["iso", "floppy"]:
            disk_path = libvirt.create_local_disk(disk_format, path)
            disk.update({"format": disk_format,
                         "source": disk_path})
        elif disk_format == "nfs":
            nfs_disk_type = params.get("nfs_disk_type", None)
            disk.update(setup_nfs_disk(os.path.split(path)[-1], nfs_disk_type))

        elif disk_format == "iscsi":
            # Create iscsi device if needed.
            image_size = params.get("image_size", "2G")
            device_source = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, image_size=image_size)
            logging.debug("iscsi dev name: %s", device_source)

            # Format the disk and make file system.
            libvirt.mk_label(device_source)
            libvirt.mk_part(device_source)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("%s1" % device_source, "ext3")
            device_source += "1"
            disk.update({"format": disk_format,
                         "source": device_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_size = params.get("virt_disk_device_size", "1")
            device_source = libvirt.create_local_disk(
                "file", path, disk_size, disk_format=disk_format)
            disk.update({"format": disk_format,
                         "source": device_source})

        return disk
    def prepare_disk(path, disk_format):
        """
        Prepare the disk for a given disk format.
        """
        disk = {}
        # Check if we test with a non-existed disk.
        if os.path.split(path)[-1].startswith("notexist."):
            disk.update({"format": disk_format,
                         "source": path})

        elif disk_format == "scsi":
            scsi_option = params.get("virt_disk_device_scsi_option", "")
            disk_source = libvirt.create_scsi_disk(scsi_option)
            if disk_source:
                disk.update({"format": "scsi",
                             "source": disk_source})
            else:
                raise error.TestNAError("Get scsi disk failed")

        elif disk_format in ["iso", "floppy"]:
            disk_path = libvirt.create_local_disk(disk_format, path)
            disk.update({"format": disk_format,
                         "source": disk_path})
        elif disk_format == "nfs":
            nfs_disk_type = params.get("nfs_disk_type", None)
            disk.update(setup_nfs_disk(os.path.split(path)[-1], nfs_disk_type))

        elif disk_format == "iscsi":
            # Create iscsi device if needed.
            image_size = params.get("image_size", "2G")
            device_source = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, image_size=image_size)
            logging.debug("iscsi dev name: %s", device_source)

            # Format the disk and make file system.
            libvirt.mk_part(device_source)
            # Run partprobe to make the change take effect.
            utils.run("partprobe", ignore_status=True)
            libvirt.mkfs("%s1" % device_source, "ext3")
            device_source += "1"
            disk.update({"format": disk_format,
                         "source": device_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_size = params.get("virt_disk_device_size", "1")
            device_source = libvirt.create_local_disk(
                "file", path, disk_size, disk_format=disk_format)
            disk.update({"format": disk_format,
                         "source": device_source})

        return disk
Example #11
0
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
Example #13
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    check_cmd = params.get("check_cmd")
    expect_match = params.get("expect_match")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if 'parameter_name_' in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict['name'] = params[params_key[i]]
        params_dict['value'] = params['parameter_value_%s' % i]
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict['name'] = filter_name
    filterref_dict['parameters'] = filter_param_list

    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd()
    device_name = None
    try:
        rule = params.get("rule")
        if rule:
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, 'ext4')
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            process.run(cmd, shell=True)

        if ipset_command:
            pkg = "ipset"
            if not utils_package.package_install(pkg):
                test.cancel("Can't install ipset on host")
            process.run(ipset_command, shell=True)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices('interface')[0]
            iface_target = iface_xml.target['dev']
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if check_cmd:
                if "DEVNAME" in check_cmd:
                    check_cmd = check_cmd.replace("DEVNAME", iface_target)
                ret = utils_misc.wait_for(lambda: not process.system(
                    check_cmd, ignore_status=True, shell=True),
                                          timeout=30)
                if not ret:
                    test.fail("Rum command '%s' failed" % check_cmd)
                # This change anchors nwfilter_vm_start.possitive_test.new_filter.variable_notation case
                # The matched destination could be ip address or hostname
                if "iptables -L" in check_cmd and expect_match:
                    # ip address that need to be replaced
                    replace_param = params.get("parameter_value_2")
                    #Get hostname by ip address.
                    hostname_info = None
                    try:
                        hostname_info = socket.gethostbyaddr(replace_param)
                    except socket.error as e:
                        logging.info(
                            "Failed to get hostname from ip address with error: %s",
                            str(e))
                    if hostname_info:
                        # String is used to replace ip address
                        replace_with = "%s|%s" % (replace_param,
                                                  hostname_info[0])
                        expect_match = r"%s" % expect_match.replace(
                            replace_param, replace_with)
                        logging.debug("final iptables match string:%s",
                                      expect_match)
                out = to_text(
                    process.system_output(check_cmd,
                                          ignore_status=False,
                                          shell=True))
                if expect_match and not re.search(expect_match, out):
                    test.fail("'%s' not found in output: %s" %
                              (expect_match, out))

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            cmd = "kill -s TERM `pidof libvirtd`"
            process.run(cmd, shell=True)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(),
                                      timeout=30)
            # After libvirt 5.6.0, libvirtd is using systemd socket activation by default
            if not ret and not libvirt_version.version_compare(5, 6, 0):
                test.fail("Failed to kill libvirtd. %s" % bug_url)

    finally:
        if kill_libvirtd:
            libvirtd.restart()
        # Clean env
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Undefine created filter
        if filter_name != exist_filter:
            virsh.nwfilter_undefine(filter_name, debug=True)
        if mount_noexec_tmp:
            if device_name:
                process.run("umount -l %s" % device_name,
                            ignore_status=True,
                            shell=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        if ipset_command:
            process.run("ipset destroy blacklist", shell=True)
def run(test, params, env):
    """
    Test disk attachement of multiple disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()

    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = "disk"
        if options.has_key("sgio") and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"],
                           'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {disk_attr: dev_name}})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if options.has_key("driver"):
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if options.has_key("share"):
            if options["share"] == "shareable":
                disk_xml.share = True

        return disk_xml

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("No multi vms provided.")

    # Disk specific attributes.
    vms_sgio = params.get("virt_disk_vms_sgio", "").split()
    vms_share = params.get("virt_disk_vms_share", "").split()
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_format = params.get("virt_disk_format", "")
    scsi_options = params.get("scsi_options", "")
    disk_driver_options = params.get("disk_driver_options", "")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    test_error_policy = "yes" == params.get("virt_disk_test_error_policy",
                                            "no")
    test_shareable = "yes" == params.get("virt_disk_test_shareable", "no")
    disk_source_path = test.virtdir

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in range(2):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)

    try:
        # Create disk images if needed.
        disks = []
        if disk_format == "scsi":
            disk_source = libvirt.create_scsi_disk(scsi_options)
            if not disk_source:
                raise error.TestNAError("Get scsi disk failed.")
            disks.append({"format": "scsi", "source": disk_source})

        elif disk_format == "iscsi":
            # Create iscsi device if neened.
            image_size = params.get("image_size", "100M")
            disk_source = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, image_size=image_size)
            logging.debug("iscsi dev name: %s", disk_source)
            # Format the disk and make the file system.
            libvirt.mk_part(disk_source, size="10M")
            libvirt.mkfs("%s1" % disk_source, "ext3")
            disk_source += "1"
            disks.append({"format": disk_format,
                          "source": disk_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_path = "%s/test.%s" % (disk_source_path, disk_format)
            disk_source = libvirt.create_local_disk("file", disk_path, "1",
                                                    disk_format=disk_format)
            libvirt.mkfs(disk_source, "ext3")
            disks.append({"format": disk_format,
                          "source": disk_source})

        # Compose the new domain xml
        vms_list = []
        for i in range(2):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            if disk_bus == "scsi":
                set_vm_controller_xml(vmxml)
            disk_sgio = ""
            if len(vms_sgio) > i:
                disk_sgio = vms_sgio[i]
            shareable = ""
            if len(vms_share) > i:
                shareable = vms_share[i]
            disk_xml = get_vm_disk_xml(disk_type, disk_source,
                                       sgio=disk_sgio, share=shareable,
                                       target=disk_target, bus=disk_bus,
                                       driver=disk_driver_options)
            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                vmxml.sync()
            vms_list.append({"name": vm_names[i], "vm": vm,
                             "status": "yes" == status_error[i],
                             "disk": disk_xml})
            logging.debug("vms_list %s" % vms_list)

        for i in range(len(vms_list)):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    raise error.TestFail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(vms_list[i]['name'],
                                                 vms_list[i]['disk'].xml).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        raise error.TestFail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        raise error.TestFail('Hotplug disk device unexpectedly.')

                # Check disk error_policy option in VMs.
                if test_error_policy:
                    error_policy = vms_list[i]['disk'].driver["error_policy"]
                    if i == 0:
                        # If we testing enospace error policy, only 1 vm used
                        if error_policy == "enospace":
                            cmd = ("mount /dev/%s /mnt && dd if=/dev/zero of=/mnt/test"
                                   " bs=1M count=2000 2>&1 | grep 'No space left'"
                                   % disk_target)
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm0 exit %s; output: %s", s, o)
                            if 0 != s:
                                raise error.TestFail("Test error_policy %s: cann't see"
                                                     " error messages")
                            session.close()
                            break

                        if session.cmd_status("fdisk -l /dev/%s && mount /dev/%s /mnt; ls /mnt"
                                              % (disk_target, disk_target)):
                            session.close()
                            raise error.TestFail("Test error_policy: "
                                                 "failed to mount disk")
                    if i == 1:
                        try:
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mkfs.ext3 -F /dev/%s "
                                   % (disk_target, disk_target))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm1 exit %s; output: %s", s, o)
                            session.close()
                            cmd = ("dd if=/dev/zero of=/mnt/test bs=1M count=100 && dd if="
                                   "/mnt/test of=/dev/null bs=1M;dmesg | grep 'I/O error'")
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if error_policy == "report":
                                if s:
                                    raise error.TestFail("Test error_policy %s: cann't report"
                                                         " error" % error_policy)
                            elif error_policy == "ignore":
                                if 0 == s:
                                    raise error.TestFail("Test error_policy %s: error cann't"
                                                         " be ignored" % error_policy)
                            session0.close()
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            if error_policy == "stop":
                                if not vms_list[0]['vm'].is_paused():
                                    raise error.TestFail("Test error_policy %s: cann't stop"
                                                         " VM" % error_policy)
                            else:
                                logging.error(str(e))
                                raise error.TestFail("Test error_policy %s: login failed"
                                                     % error_policy)

                if test_shareable:
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to write on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && echo '%s' "
                                   "> /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test disk shareable on VM0 failed")
                            session0.close()
                            # Try to read on vm1.
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && grep %s"
                                   " /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test disk shareable on VM1 failed")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            logging.error(str(e))
                            raise error.TestFail("Test disk shareable: login failed")
                session.close()
            except virt_vm.VMStartError:
                if vms_list[i]['status']:
                    raise error.TestFail('VM Failed to start'
                                         ' for some reason!')
Example #15
0
def check_in_vm(vm, target, old_parts, is_equal=False):
    """
    Check mount/read/write disk in VM.

    :param vm: VM guest.
    :param target: Disk dev in VM.
    :param old_parts: old part partitions
    :param is_equal: whether two are equals
    :return: True if check successfully.
    """
    try:
        session = vm.wait_for_login()
        rpm_stat, out_put = session.cmd_status_output(
            "rpm -q parted || "
            "yum install -y parted", 300)
        if rpm_stat != 0:
            raise exceptions.TestFail("Failed to query/install parted:\n%s",
                                      out_put)

        new_parts = utils_disk.get_parts_list(session)
        added_parts = list(set(new_parts).difference(set(old_parts)))
        logging.info("Added parts:%s", added_parts)
        if is_equal:
            if len(added_parts) != 0:
                logging.error("new added parts are not equal the old one")
                return False
            else:
                return True
        if len(added_parts) != 1:
            logging.error("The number of new partitions is invalid in VM")
            return False

        added_part = None
        if target.startswith("vd"):
            if added_parts[0].startswith("vd"):
                added_part = added_parts[0]
        elif target.startswith("hd"):
            if added_parts[0].startswith("sd"):
                added_part = added_parts[0]

        if not added_part:
            logging.error("Can't see added partition in VM")
            return False

        device_source = os.path.join(os.sep, 'dev', added_part)
        libvirt.mk_label(device_source, session=session)
        libvirt.mk_part(device_source, size="10M", session=session)
        # Run partprobe to make the change take effect.
        process.run("partprobe", ignore_status=True, shell=True)
        libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

        cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
               " && cat /mnt/testfile && umount /mnt" % added_part)
        s, o = session.cmd_status_output(cmd)
        logging.info("Check disk operation in VM:\n%s", o)
        session.close()
        if s != 0:
            logging.error("error happened when execute command:\n%s", cmd)
            return False
        return True
    except Exception as e:
        logging.error(str(e))
        return False
def run(test, params, env):
    """
    Test disk attachement of multiple disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()

    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = options["disk_device"]
        if "sgio" in options and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"],
                           'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {disk_attr: dev_name}})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if "driver" in options:
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if "share" in options:
            if options["share"] == "shareable":
                disk_xml.share = True

        if "readonly" in options:
            if options["readonly"] == "readonly":
                disk_xml.readonly = True

        logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

        return disk_xml

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        test.cancel("No multi vms provided.")

    # Disk specific attributes.
    vms_sgio = params.get("virt_disk_vms_sgio", "").split()
    vms_share = params.get("virt_disk_vms_share", "").split()
    vms_readonly = params.get("virt_disk_vms_readonly", "").split()
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_device = params.get("virt_disk_device", "disk")
    disk_format = params.get("virt_disk_format", "")
    scsi_options = params.get("scsi_options", "")
    disk_driver_options = params.get("disk_driver_options", "")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    test_error_policy = "yes" == params.get("virt_disk_test_error_policy",
                                            "no")
    test_shareable = "yes" == params.get("virt_disk_test_shareable", "no")
    test_readonly = "yes" == params.get("virt_disk_test_readonly", "no")
    disk_source_path = data_dir.get_data_dir()
    disk_path = ""
    tmp_filename = "cdrom_te.tmp"
    tmp_readonly_file = ""

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in list(range(2)):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)
    # Initialize VM list
    vms_list = []
    try:
        # Create disk images if needed.
        disks = []
        if disk_format == "scsi":
            disk_source = libvirt.create_scsi_disk(scsi_options)
            if not disk_source:
                test.cancel("Get scsi disk failed.")
            disks.append({"format": "scsi", "source": disk_source})

        elif disk_format == "iscsi":
            # Create iscsi device if neened.
            image_size = params.get("image_size", "100M")
            disk_source = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, image_size=image_size)
            logging.debug("iscsi dev name: %s", disk_source)
            # Format the disk and make the file system.
            libvirt.mk_label(disk_source)
            libvirt.mk_part(disk_source, size="10M")
            libvirt.mkfs("%s1" % disk_source, "ext3")
            disk_source += "1"
            disks.append({"format": disk_format,
                          "source": disk_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_path = "%s/test.%s" % (disk_source_path, disk_format)
            disk_source = libvirt.create_local_disk("file", disk_path, "1",
                                                    disk_format=disk_format)
            libvirt.mkfs(disk_source, "ext3")
            disks.append({"format": disk_format,
                          "source": disk_source})

        if disk_device == "cdrom":
            tmp_readonly_file = "/root/%s" % tmp_filename
            with open(tmp_readonly_file, 'w') as f:
                f.write("teststring\n")
            disk_path = "%s/test.iso" % disk_source_path
            disk_source = libvirt.create_local_disk("iso", disk_path, "1")
            disks.append({"source": disk_source})

        # Compose the new domain xml
        for i in list(range(2)):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            if disk_bus == "scsi":
                set_vm_controller_xml(vmxml)
            disk_sgio = ""
            if len(vms_sgio) > i:
                disk_sgio = vms_sgio[i]
            shareable = ""

            # Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable attribute
            # need be set if both of VMs need be started successfully in case they share the same disk
            if test_error_policy and libvirt_version.version_compare(3, 9, 0):
                vms_share = ["shareable", "shareable"]
            if len(vms_share) > i:
                shareable = vms_share[i]
            readonly = ""
            if len(vms_readonly) > i:
                readonly = vms_readonly[i]
            disk_xml = get_vm_disk_xml(disk_type, disk_source,
                                       sgio=disk_sgio, share=shareable,
                                       target=disk_target, bus=disk_bus,
                                       driver=disk_driver_options,
                                       disk_device=disk_device,
                                       readonly=readonly)
            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                vmxml.sync()
            vms_list.append({"name": vm_names[i], "vm": vm,
                             "status": "yes" == status_error[i],
                             "disk": disk_xml})
            logging.debug("vms_list %s" % vms_list)

        for i in list(range(len(vms_list))):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    test.fail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(vms_list[i]['name'],
                                                 vms_list[i]['disk'].xml,
                                                 debug=True).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        test.fail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        test.fail('Hotplug disk device unexpectedly.')

                # Check disk error_policy option in VMs.
                if test_error_policy:
                    error_policy = vms_list[i]['disk'].driver["error_policy"]
                    if i == 0:
                        # If we testing enospace error policy, only 1 vm used
                        if error_policy == "enospace":
                            cmd = ("mount /dev/%s /mnt && dd if=/dev/zero of=/mnt/test"
                                   " bs=1M count=2000 2>&1 | grep 'No space left'"
                                   % disk_target)
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm0 exit %s; output: %s", s, o)
                            if 0 != s:
                                test.fail("Test error_policy %s: cann't see"
                                          " error messages")
                            session.close()
                            break

                        if session.cmd_status("fdisk -l /dev/%s && mount /dev/%s /mnt; ls /mnt"
                                              % (disk_target, disk_target)):
                            session.close()
                            test.fail("Test error_policy: "
                                      "failed to mount disk")
                    if i == 1:
                        try:
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mkfs.ext3 -F /dev/%s "
                                   % (disk_target, disk_target))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm1 exit %s; output: %s", s, o)
                            session.close()
                            cmd = ("dd if=/dev/zero of=/mnt/test bs=1M count=100 && dd if="
                                   "/mnt/test of=/dev/null bs=1M;dmesg | grep 'I/O error'")
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if error_policy == "report":
                                process.run("rm -rf %s" % disk_source, ignore_status=False, shell=True)
                                vms_list[0]['vm'].destroy(gracefully=False)

                                def _check_error():
                                    cmd_result = virsh.domblkerror(vms_list[0]['name'])
                                    return 'Segmentation fault' in cmd_result.stdout_text.strip()
                                status = utils_misc.wait_for(lambda: _check_error, timeout=90)
                                if not status:
                                    test.fail("Test error_policy %s: cann't report"
                                              " error" % error_policy)
                            elif error_policy == "ignore":
                                if 0 == s:
                                    test.fail("Test error_policy %s: error cann't"
                                              " be ignored" % error_policy)
                            session0.close()
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
                            if error_policy == "stop":
                                if not vms_list[0]['vm'].is_paused():
                                    test.fail("Test error_policy %s: cann't stop"
                                              " VM" % error_policy)
                            else:
                                logging.error(str(e))
                                test.fail("Test error_policy %s: login failed"
                                          % error_policy)

                if test_shareable:
                    # Check shared file selinux label with type and MCS as
                    # svirt_image_t:s0
                    if disk_path:
                        if not utils_selinux.check_context_of_file(disk_path, "svirt_image_t:s0"):
                            test.fail("Context of shared iso is not expected.")

                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to write on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && echo '%s' "
                                   "> /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if s:
                                test.fail("Test disk shareable on VM0 failed")
                            session0.close()
                            # Try to read on vm1.
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && grep %s"
                                   " /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                test.fail("Test disk shareable on VM1 failed")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
                            logging.error(str(e))
                            test.fail("Test disk shareable: login failed")

                if test_readonly:
                    # Check shared file selinux label with type and MCS as
                    # virt_content_t:s0
                    if disk_path:
                        if not utils_selinux.check_context_of_file(disk_path, "virt_content_t:s0"):
                            test.fail("Context of shared iso is not expected.")
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to read on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = "mount -o ro /dev/cdrom /mnt && grep "
                            cmd += "%s /mnt/%s" % (test_str, tmp_filename)
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            session0.close()
                            if s:
                                test.fail("Test file not found in VM0 cdrom")
                            # Try to read on vm1.
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                test.fail("Test file not found in VM1 cdrom")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
                            logging.error(str(e))
                            test.fail("Test disk shareable: login failed")
                session.close()
            except virt_vm.VMStartError as start_error:
                if vms_list[i]['status']:
                    test.fail("VM failed to start."
                              "Error: %s" % str(start_error))
    finally:
        # Stop VMs.
        for i in list(range(len(vms_list))):
            if vms_list[i]['vm'].is_alive():
                vms_list[i]['vm'].destroy(gracefully=False)

        # Recover VMs.
        for vmxml_backup in vms_backup:
            vmxml_backup.sync()

        # Remove disks.
        for img in disks:
            if 'format' in img:
                if img["format"] == "scsi":
                    utils_misc.wait_for(libvirt.delete_scsi_disk,
                                        120, ignore_errors=True)
                elif img["format"] == "iscsi":
                    libvirt.setup_or_cleanup_iscsi(is_setup=False)
            elif "source" in img:
                os.remove(img["source"])

        if tmp_readonly_file:
            if os.path.exists(tmp_readonly_file):
                os.remove(tmp_readonly_file)
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name, pool_type, checkpoint,
                   expect_value="", expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    error_msg = "Dir pool should be always active when libvirtd restart. "
                    error_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.error(error_msg)
                test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint, actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type, size="0.5",
                                                    vgname=vg_name, lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" % (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name, ignore_status=True, debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall("//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool, readonly=ro_flag,
                                      ignore_status=True, debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name, pool_type, checkpoint='Autostart',
                       expect_value="yes", expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name, pool_type, checkpoint="State",
                       expect_value="active", expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool, extra="--disable", debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name, pool_type, checkpoint='Autostart',
                           expect_value="no", expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name, pool_type, checkpoint='State',
                           expect_value="inactive", expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, **kwargs)
            if new_device:
                utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except test.fail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name,
                   pool_type,
                   checkpoint,
                   expect_value="",
                   expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    debug_msg = "Dir pool should be always active when libvirtd restart. "
                    debug_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.debug(debug_msg)
                else:
                    test.fail("Pool %s isn't %s as expected" %
                              (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint,
                              actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target
    }
    params.update(kwargs)
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(**params)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type,
                                                    size="0.5",
                                                    vgname=vg_name,
                                                    lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" %
                              (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name,
                                          ignore_status=True,
                                          debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall(
                            "//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool,
                                      readonly=ro_flag,
                                      ignore_status=True,
                                      debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name,
                       pool_type,
                       checkpoint='Autostart',
                       expect_value="yes",
                       expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name,
                       pool_type,
                       checkpoint="State",
                       expect_value="active",
                       expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool,
                                          extra="--disable",
                                          debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name,
                           pool_type,
                           checkpoint='Autostart',
                           expect_value="no",
                           expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name,
                           pool_type,
                           checkpoint='State',
                           expect_value="inactive",
                           expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(**params)
            if new_device:
                utlv.delete_local_disk(disk_type,
                                       vgname=vg_name,
                                       lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except exceptions.TestFail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
Example #19
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")
    need_vm2 = "yes" == params.get("need_vm2", "no")
    add_vm_name = params.get("add_vm_name", "vm2")
    vms = [vm]
    dst_outside = params.get("dst_outside", "www.google.com")
    ping_timeout = int(params.get("ping_timeout", "10"))

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if 'parameter_name_' in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict['name'] = params[params_key[i]]
        params_dict['value'] = params['parameter_value_%s' % i]
        if params_dict['value'] == "MAC_of_virbr0":
            virbr0_info = process.run("ip a | grep virbr0: -A1",
                                      shell=True).stdout_text.strip()
            virbr0_mac = re.search(
                r'link/ether\s+(\w{2}:\w{2}:\w{2}:\w{2}:\w{2}:\w{2})',
                virbr0_info, re.M | re.I).group(1)
            params_dict['value'] = virbr0_mac
            logging.debug("params_dict['value'] is %s " % params_dict['value'])
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict['name'] = filter_name
    filterref_dict['parameters'] = filter_param_list
    params['filter_uuid'] = process.run("uuidgen",
                                        ignore_status=True,
                                        shell=True).stdout_text.strip()

    # get all the check commands and corresponding expected results form config file and make a dictionary
    cmd_list_ = params.get('check_cmd', '')
    if cmd_list_:
        cmd_list = cmd_list_.split(',')
        expect_res = params.get('expect_match', '').split(',')
        logging.debug("cmd_list is %s" % cmd_list)
        logging.debug("expect_res is %s" % expect_res)
        cmd_result_dict = dict(zip(cmd_list, expect_res))
        logging.debug("the check dict is %s" % cmd_result_dict)
    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd("virtqemud")
    device_name = None

    def check_nwfilter_rules(check_cmd, expect_match):
        """"check the nwfilter corresponding rule is added by iptables commands"""
        ret = utils_misc.wait_for(lambda: not process.system(
            check_cmd, ignore_status=True, shell=True),
                                  timeout=30)
        if not ret:
            test.fail("Rum command '%s' failed" % check_cmd)
        # This change anchors nwfilter_vm_start.possitive_test.new_filter.variable_notation case
        # The matched destination could be ip address or hostname
        if "iptables -L" in check_cmd and expect_match and 'ACCEPT' in expect_match:
            # ip address that need to be replaced
            replace_param = params.get("parameter_value_2")
            # Get hostname by ip address.
            hostname_info = None
            try:
                hostname_info = socket.gethostbyaddr(replace_param)
            except socket.error as e:
                logging.info(
                    "Failed to get hostname from ip address with error: %s",
                    str(e))
            if hostname_info:
                # String is used to replace ip address
                replace_with = "%s|%s" % (replace_param, hostname_info[0])
                expect_match = r"%s" % expect_match.replace(
                    replace_param, replace_with)
                logging.debug("final iptables match string:%s", expect_match)
        out = astring.to_text(
            process.system_output(check_cmd, ignore_status=False, shell=True))
        if expect_match and not re.search(expect_match, out):
            test.fail("'%s' not found in output: %s" % (expect_match, out))

    def clean_up_dirty_nwfilter_binding():
        cmd_result = virsh.nwfilter_binding_list(debug=True)
        binding_list = cmd_result.stdout_text.strip().splitlines()
        binding_list = binding_list[2:]
        result = []
        # If binding list is not empty.
        if binding_list:
            for line in binding_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        logging.info("nwfilter binding list is: %s", result)
        for binding_uuid in result:
            try:
                virsh.nwfilter_binding_delete(binding_uuid)
            except Exception as e:
                logging.error(
                    "Exception thrown while undefining nwfilter-binding: %s",
                    str(e))
                raise

    try:
        # Clean up dirty nwfilter binding if there are.
        clean_up_dirty_nwfilter_binding()
        rule = params.get("rule")
        if rule:
            # Add pre-check whether nwfilter exists or not since
            # utlv.create_nwfilter_xml will fail if there is no any nwfilter exists
            nwfilter_list = libvirt_nwfilter.get_nwfilter_list()
            if not nwfilter_list:
                test.error("There is no any nwfilter existed on the host")
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, 'ext4')
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            process.run(cmd, shell=True)

        if ipset_command:
            pkg = "ipset"
            if not utils_package.package_install(pkg):
                test.cancel("Can't install ipset on host")
            process.run(ipset_command, shell=True)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices('interface')[0]
            iface_target = iface_xml.target['dev']
            iface_mac = iface_xml.mac_address
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if need_vm2:
                # Clone more vm for testing
                result = virsh.dom_list('--inactive').stdout_text
                if add_vm_name in result:
                    logging.debug("%s is already exists!" % add_vm_name)
                    vms.append(env.get_vm(add_vm_name))
                else:
                    vm.destroy()
                    ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                                add_vm_name,
                                                                True,
                                                                timeout=360)
                    if ret_clone.exit_status:
                        test.fail("Error when clone a second vm!")
                    vms.append(vm.clone(add_vm_name))
                    vm.start()
                vm2 = vms[1]
                logging.debug("Now the vms is: %s", [dom.name for dom in vms])
                # update the vm2 interface with the nwfilter
                logging.debug("filter_params_list is %s" % filter_param_list)
                iface_dict = {
                    "filter": filter_name,
                    "filter_parameters": filter_param_list,
                    "del_mac": True
                }
                if vm2.is_alive():
                    vm2.destroy()
                utlv.modify_vm_iface(vm2.name, "update_iface", iface_dict)
                vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
                iface_xml = vmxml.get_devices('interface')[0]
                logging.debug("iface_xml for vm2 is %s" % iface_xml)
                vm2.start()
                vm2_session = vm2.wait_for_serial_login()
                vm2_mac = vm2.get_mac_address()
                vm2_ip = utils_net.get_guest_ip_addr(vm2_session, vm2_mac)
                vm.session = vm.wait_for_serial_login()
                # test network functions, the 2 vms can not access to each other
                gateway_ip = utils_net.get_ip_address_by_interface("virbr0")
                status1, output1 = utils_net.ping(dest=vm2_ip,
                                                  count='3',
                                                  timeout=ping_timeout,
                                                  session=vm.session,
                                                  force_ipv4=True)
                status2, output2 = utils_net.ping(dest=gateway_ip,
                                                  count='3',
                                                  timeout=ping_timeout,
                                                  session=vm.session,
                                                  force_ipv4=True)
                status3, output3 = utils_net.ping(dest=dst_outside,
                                                  count='3',
                                                  timeout=ping_timeout,
                                                  session=vm.session,
                                                  force_ipv4=True)
                if not status1:
                    test.fail(
                        "vm with clean-traffic-gateway ping succeed to %s %s, but it is not expected!"
                        % (vm2.name, vm2_ip))
                if status2 or status3:
                    test.fail("vm ping failed! check %s \n %s" %
                              (output2, output3))
            if cmd_list_:
                loop = 0
                for check_cmd_, expect_match_ in cmd_result_dict.items():
                    check_cmd = check_cmd_.strip()
                    expect_match = expect_match_.strip()
                    if "DEVNAME" in check_cmd:
                        check_cmd = check_cmd.replace("DEVNAME", iface_target)
                    if "VMMAC" in expect_match:
                        expect_match = expect_match.replace("VMMAC", iface_mac)
                    logging.debug(
                        "the check_cmd is %s, and expected result is %s" %
                        (check_cmd, expect_match))
                    check_nwfilter_rules(check_cmd, expect_match)
                    loop += 1
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            daemon_name = libvirtd.service_name
            pid = process.run('pidof %s' % daemon_name,
                              shell=True).stdout_text.strip()
            cmd = "kill -s TERM %s" % pid
            process.run(cmd, shell=True)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(),
                                      timeout=30)
            # After libvirt 5.6.0, libvirtd is using systemd socket activation by default
            if not ret and not libvirt_version.version_compare(5, 6, 0):
                test.fail("Failed to kill libvirtd. %s" % bug_url)

    finally:
        if kill_libvirtd:
            libvirtd.restart()
        # Clean env
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Undefine created filter except clean-traffic as it is built-in nwfilter
        if filter_name != exist_filter and filter_name != 'clean-traffic':
            virsh.nwfilter_undefine(filter_name, debug=True)
        if mount_noexec_tmp:
            if device_name:
                process.run("umount -l %s" % device_name,
                            ignore_status=True,
                            shell=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        if ipset_command:
            process.run("ipset destroy blacklist", shell=True)
        # Remove additional vms
        if need_vm2:
            result = virsh.dom_list("--all").stdout_text
            if add_vm_name in result:
                virsh.remove_domain(add_vm_name, "--remove-all-storage")
Example #20
0
def run(test, params, env):
    """
    Test disk attachement of multiple disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()

    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = "disk"
        if options.has_key("sgio") and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"], 'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {
                disk_attr: dev_name
            }})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if options.has_key("driver"):
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if options.has_key("share"):
            if options["share"] == "shareable":
                disk_xml.share = True

        return disk_xml

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("No multi vms provided.")

    # Disk specific attributes.
    vms_sgio = params.get("virt_disk_vms_sgio", "").split()
    vms_share = params.get("virt_disk_vms_share", "").split()
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_format = params.get("virt_disk_format", "")
    scsi_options = params.get("scsi_options", "")
    disk_driver_options = params.get("disk_driver_options", "")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    test_error_policy = "yes" == params.get("virt_disk_test_error_policy",
                                            "no")
    test_shareable = "yes" == params.get("virt_disk_test_shareable", "no")
    disk_source_path = test.virtdir

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in range(2):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)

    try:
        # Create disk images if needed.
        disks = []
        if disk_format == "scsi":
            disk_source = libvirt.create_scsi_disk(scsi_options)
            if not disk_source:
                raise error.TestNAError("Get scsi disk failed.")
            disks.append({"format": "scsi", "source": disk_source})

        elif disk_format == "iscsi":
            # Create iscsi device if neened.
            image_size = params.get("image_size", "100M")
            disk_source = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                         is_login=True,
                                                         image_size=image_size)
            logging.debug("iscsi dev name: %s", disk_source)
            # Format the disk and make the file system.
            libvirt.mk_part(disk_source, size="10M")
            libvirt.mkfs("%s1" % disk_source, "ext3")
            disk_source += "1"
            disks.append({"format": disk_format, "source": disk_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_path = "%s/test.%s" % (disk_source_path, disk_format)
            disk_source = libvirt.create_local_disk("file",
                                                    disk_path,
                                                    "1",
                                                    disk_format=disk_format)
            libvirt.mkfs(disk_source, "ext3")
            disks.append({"format": disk_format, "source": disk_source})

        # Compose the new domain xml
        vms_list = []
        for i in range(2):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            if disk_bus == "scsi":
                set_vm_controller_xml(vmxml)
            disk_sgio = ""
            if len(vms_sgio) > i:
                disk_sgio = vms_sgio[i]
            shareable = ""
            if len(vms_share) > i:
                shareable = vms_share[i]
            disk_xml = get_vm_disk_xml(disk_type,
                                       disk_source,
                                       sgio=disk_sgio,
                                       share=shareable,
                                       target=disk_target,
                                       bus=disk_bus,
                                       driver=disk_driver_options)
            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                vmxml.sync()
            vms_list.append({
                "name": vm_names[i],
                "vm": vm,
                "status": "yes" == status_error[i],
                "disk": disk_xml
            })
            logging.debug("vms_list %s" % vms_list)

        for i in range(len(vms_list)):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    raise error.TestFail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(
                        vms_list[i]['name'],
                        vms_list[i]['disk'].xml).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        raise error.TestFail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        raise error.TestFail(
                            'Hotplug disk device unexpectedly.')

                # Check disk error_policy option in VMs.
                if test_error_policy:
                    error_policy = vms_list[i]['disk'].driver["error_policy"]
                    if i == 0:
                        # If we testing enospace error policy, only 1 vm used
                        if error_policy == "enospace":
                            cmd = (
                                "mount /dev/%s /mnt && dd if=/dev/zero of=/mnt/test"
                                " bs=1M count=2000 2>&1 | grep 'No space left'"
                                % disk_target)
                            s, o = session.cmd_status_output(cmd)
                            logging.debug(
                                "error_policy in vm0 exit %s; output: %s", s,
                                o)
                            if 0 != s:
                                raise error.TestFail(
                                    "Test error_policy %s: cann't see"
                                    " error messages")
                            session.close()
                            break

                        if session.cmd_status(
                                "fdisk -l /dev/%s && mount /dev/%s /mnt; ls /mnt"
                                % (disk_target, disk_target)):
                            session.close()
                            raise error.TestFail("Test error_policy: "
                                                 "failed to mount disk")
                    if i == 1:
                        try:
                            session0 = vms_list[0]['vm'].wait_for_login(
                                timeout=10)
                            cmd = (
                                "fdisk -l /dev/%s && mkfs.ext3 -F /dev/%s " %
                                (disk_target, disk_target))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug(
                                "error_policy in vm1 exit %s; output: %s", s,
                                o)
                            session.close()
                            cmd = (
                                "dd if=/dev/zero of=/mnt/test bs=1M count=100 && dd if="
                                "/mnt/test of=/dev/null bs=1M;dmesg | grep 'I/O error'"
                            )
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s",
                                          s, o)
                            if error_policy == "report":
                                if s:
                                    raise error.TestFail(
                                        "Test error_policy %s: cann't report"
                                        " error" % error_policy)
                            elif error_policy == "ignore":
                                if 0 == s:
                                    raise error.TestFail(
                                        "Test error_policy %s: error cann't"
                                        " be ignored" % error_policy)
                            session0.close()
                        except (remote.LoginError, virt_vm.VMError,
                                aexpect.ShellError), e:
                            if error_policy == "stop":
                                if not vms_list[0]['vm'].is_paused():
                                    raise error.TestFail(
                                        "Test error_policy %s: cann't stop"
                                        " VM" % error_policy)
                            else:
                                logging.error(str(e))
                                raise error.TestFail(
                                    "Test error_policy %s: login failed" %
                                    error_policy)

                if test_shareable:
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to write on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(
                                timeout=10)
                            cmd = (
                                "fdisk -l /dev/%s && mount /dev/%s /mnt && echo '%s' "
                                "> /mnt/test && umount /mnt" %
                                (disk_target, disk_target, test_str))
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s",
                                          s, o)
                            if s:
                                raise error.TestFail(
                                    "Test disk shareable on VM0 failed")
                            session0.close()
                            # Try to read on vm1.
                            cmd = (
                                "fdisk -l /dev/%s && mount /dev/%s /mnt && grep %s"
                                " /mnt/test && umount /mnt" %
                                (disk_target, disk_target, test_str))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s",
                                          s, o)
                            if s:
                                raise error.TestFail(
                                    "Test disk shareable on VM1 failed")
                        except (remote.LoginError, virt_vm.VMError,
                                aexpect.ShellError), e:
                            logging.error(str(e))
                            raise error.TestFail(
                                "Test disk shareable: login failed")
                session.close()
            except virt_vm.VMStartError:
                if vms_list[i]['status']:
                    raise error.TestFail('VM Failed to start'
                                         ' for some reason!')
Example #21
0
def run(test, params, env):
    """
    Test command: domfsinfo [--domain]

    The command gets information of domain's mounted filesystems.
    """
    start_vm = ("yes" == params.get("start_vm", "yes"))
    start_ga = ("yes" == params.get("start_ga", "yes"))
    prepare_channel = ("yes" == params.get("prepare_channel", "yes"))
    status_error = ("yes" == params.get("status_error", "no"))
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_dir = params.get("mount_dir", None)
    quiet_mode = ("yes" == params.get("quiet_mode", False))
    readonly_mode = ("yes" == params.get("readonly_mode", False))
    nfs_mount = ("yes" == params.get("nfs_mount", False))
    domfsfreeze = ("yes" == params.get("domfsfreeze", False))

    # Hotplug and Unplug options
    hotplug_unplug = ("yes" == params.get("hotplug_unplug", False))
    disk_name = params.get("disk_name", "test")
    disk_path = os.path.join(data_dir.get_tmp_dir(), disk_name)
    disk_target = params.get("disk_target", "vdb")
    fs_type = params.get("fs_type", "ext3")
    new_part = ""

    fail_pat = []
    check_point_msg = params.get("check_point_msg", "")
    if check_point_msg:
        for msg in check_point_msg.split(";"):
            fail_pat.append(msg)

    def hotplug_domain_disk(domain, target, source=None, hotplug=True):
        """
        Hot-plug/Hot-unplug disk for domain

        :param domain: Guest name
        :param source: Source of disk device, can leave None if hotplug=False
        :param target: Target of disk device
        :param hotplug: True means hotplug, False means hot-unplug
        :return: Virsh command object
        """
        if hotplug:
            result = virsh.attach_disk(domain,
                                       source,
                                       target,
                                       "--live",
                                       ignore_status=False,
                                       debug=True)
        else:
            session = vm.wait_for_login()
            try:
                session.cmd("umount %s" % mount_dir)
                session.close()
            except:
                test.error(
                    "fail to unmount the disk before unpluging the disk")
            result = virsh.detach_disk(domain,
                                       target,
                                       "--live",
                                       ignore_status=False,
                                       debug=True)
        # It need more time for attachment to take effect
        time.sleep(5)

    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    cleanup_nfs = False
    try:
        reset_kwargs = {
            "start_vm": start_vm,
            "start_ga": start_ga,
            "prepare_channel": prepare_channel
        }
        reset_domain(vm, **reset_kwargs)

        if domfsfreeze:
            result = virsh.domfsfreeze(vm_name, debug=True)
            if result.exit_status:
                test.fail("Failed to execute virsh.domfsfreeze:\n%s" %
                          result.stderr)
        if nfs_mount:
            nfs_device = libvirt.setup_or_cleanup_nfs(True,
                                                      mount_dir=mount_dir,
                                                      is_mount=True)
            if nfs_device:
                cleanup_nfs = True
        if hotplug_unplug:
            session = vm.wait_for_login()
            new_device = libvirt.create_local_disk("file",
                                                   path=disk_path,
                                                   size="1")
            parts_list_before_attach = utils_disk.get_parts_list(session)
            hotplug_domain_disk(vm_name, disk_target, new_device)
            parts_list_after_attach = utils_disk.get_parts_list(session)
            new_part = list(
                set(parts_list_after_attach).difference(
                    set(parts_list_before_attach)))[0]
            logging.debug("The new partition is %s", new_part)
            libvirt.mkfs("/dev/%s" % new_part, fs_type, session=session)
            session.cmd_status(
                "mkdir -p {0} ; mount /dev/{1} {0}; ls {0}".format(
                    mount_dir, new_part))
            session.close()

        # Run test case
        command_dargs = {
            "readonly": readonly_mode,
            "quiet": quiet_mode,
            "debug": True
        }
        result = virsh.domfsinfo(vm_name, **command_dargs)
        if not result.exit_status:
            if fail_pat:
                test.fail("Expected fail with %s, but run succeed:\n%s" %
                          (fail_pat, result))
        else:
            if not fail_pat:
                test.fail("Expected success, but run failed:\n%s" % result)
            else:
                # If not any pattern matches(fail_pat, result.stderr)
                if not any(p in result.stderr for p in fail_pat):
                    test.fail(
                        "Expected fail with one of %s, but failed with:\n%s" %
                        (fail_pat, result))
        # Check virsh.domfsinfo output
        cmd_output = result.stdout.strip()
        if quiet_mode:
            head_pat = "Mountpoint\s+Name\s+Type\s+Target"
            check_output(cmd_output, head_pat, test, expected=False)
        elif nfs_mount:
            check_output(cmd_output, mount_dir, test, expected=False)
        elif hotplug_unplug:
            blk_target = re.findall(r'[a-z]+', new_part)[0]
            disk_pat = "%s\s+%s\s+%s\s+%s" % (mount_dir, new_part, fs_type,
                                              blk_target)
            check_output(cmd_output, disk_pat, test, expected=True)
            # Unplug domain disk
            hotplug_domain_disk(vm_name, target=new_part, hotplug=False)
            result = virsh.domfsinfo(vm_name, **command_dargs)
            if result.exit_status:
                test.fail(
                    "Failed to run virsh.domfsinfo after disk unplug:\n%s" %
                    result.stderr)
            check_output(result.stdout.strip(), disk_pat, test, expected=False)
        else:
            # Verify virsh.domfsinfo consistency
            if not status_error:
                session = vm.wait_for_login(timeout=120)
                domfsinfo = vm.domfsinfo()
                expected_result = get_mount_fs(session)
                if domfsinfo and expected_result:
                    check_domfsinfo(domfsinfo, expected_result, test)
                else:
                    logging.debug("Virsh.domfsinfo output:\n%s", domfsinfo)
                    logging.debug("Expected_result is:\n%s", expected_result)
                    test.error("Command output inconsistent with expected")
                session.close()
    finally:
        if cleanup_nfs:
            libvirt.setup_or_cleanup_nfs(False, mount_dir=mount_dir)
        if vm.is_alive():
            vm.destroy()
        if hotplug_unplug:
            if disk_path:
                cmd = "rm -rf %s" % disk_path
                process.run(cmd)
        vmxml_backup.sync()
Example #22
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    check_cmd = params.get("check_cmd")
    expect_match = params.get("expect_match")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if "parameter_name_" in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict["name"] = params[params_key[i]]
        params_dict["value"] = params["parameter_value_%s" % i]
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict["name"] = filter_name
    filterref_dict["parameters"] = filter_param_list

    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd()
    device_name = None
    try:
        rule = params.get("rule")
        if rule:
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices("interface")[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface("network")
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, "ext4")
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            utils.run(cmd)

        if ipset_command:
            try:
                os_dep.command("ipset")
            except ValueError:
                ret = utils.run("yum install ipset -y")
                if ret.exit_status:
                    raise error.TestNAError("Can't install ipset on host")
            utils.run(ipset_command)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices("interface")[0]
            iface_target = iface_xml.target["dev"]
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if check_cmd:
                if "DEVNAME" in check_cmd:
                    check_cmd = check_cmd.replace("DEVNAME", iface_target)
                ret = utils_misc.wait_for(lambda: not utils.system(check_cmd, ignore_status=True), timeout=30)
                if not ret:
                    raise error.TestFail("Rum command '%s' failed" % check_cmd)
                out = utils.system_output(check_cmd, ignore_status=False)
                if expect_match and not re.search(expect_match, out):
                    raise error.TestFail("'%s' not found in output: %s" % (expect_match, out))

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            cmd = "kill -SIGTERM `pidof libvirtd`"
            utils.run(cmd)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(), timeout=30)
            if not ret:
                raise error.TestFail("Failed to kill libvirtd. %s" % bug_url)
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    check_cmd = params.get("check_cmd")
    expect_match = params.get("expect_match")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if 'parameter_name_' in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict['name'] = params[params_key[i]]
        params_dict['value'] = params['parameter_value_%s' % i]
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict['name'] = filter_name
    filterref_dict['parameters'] = filter_param_list

    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd()
    device_name = None
    try:
        rule = params.get("rule")
        if rule:
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, 'ext4')
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            process.run(cmd, shell=True)

        if ipset_command:
            pkg = "ipset"
            if not utils_package.package_install(pkg):
                test.cancel("Can't install ipset on host")
            process.run(ipset_command, shell=True)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices('interface')[0]
            iface_target = iface_xml.target['dev']
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if check_cmd:
                if "DEVNAME" in check_cmd:
                    check_cmd = check_cmd.replace("DEVNAME", iface_target)
                ret = utils_misc.wait_for(lambda: not
                                          process.system(check_cmd,
                                                         ignore_status=True,
                                                         shell=True),
                                          timeout=30)
                if not ret:
                    test.fail("Rum command '%s' failed" % check_cmd)
                out = to_text(process.system_output(check_cmd, ignore_status=False, shell=True))
                if expect_match and not re.search(expect_match, out):
                    test.fail("'%s' not found in output: %s"
                              % (expect_match, out))

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            cmd = "kill -s TERM `pidof libvirtd`"
            process.run(cmd, shell=True)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(),
                                      timeout=30)
            if not ret:
                test.fail("Failed to kill libvirtd. %s" % bug_url)

    finally:
        if kill_libvirtd:
            libvirtd.restart()
        # Clean env
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Undefine created filter
        if filter_name != exist_filter:
            virsh.nwfilter_undefine(filter_name, debug=True)
        if mount_noexec_tmp:
            if device_name:
                process.run("umount -l %s" % device_name, ignore_status=True, shell=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        if ipset_command:
            process.run("ipset destroy blacklist", shell=True)
Example #24
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    check_cmd = params.get("check_cmd")
    expect_match = params.get("expect_match")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if 'parameter_name_' in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict['name'] = params[params_key[i]]
        params_dict['value'] = params['parameter_value_%s' % i]
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict['name'] = filter_name
    filterref_dict['parameters'] = filter_param_list

    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd()
    device_name = None
    try:
        rule = params.get("rule")
        if rule:
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, 'ext4')
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            utils.run(cmd)

        if ipset_command:
            try:
                os_dep.command("ipset")
            except ValueError:
                ret = utils.run("yum install ipset -y")
                if ret.exit_status:
                    raise error.TestNAError("Can't install ipset on host")
            utils.run(ipset_command)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices('interface')[0]
            iface_target = iface_xml.target['dev']
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if check_cmd:
                if "DEVNAME" in check_cmd:
                    check_cmd = check_cmd.replace("DEVNAME", iface_target)
                ret = utils_misc.wait_for(lambda: not
                                          utils.system(check_cmd,
                                                       ignore_status=True),
                                          timeout=30)
                if not ret:
                    raise error.TestFail("Rum command '%s' failed" % check_cmd)
                out = utils.system_output(check_cmd, ignore_status=False)
                if expect_match and not re.search(expect_match, out):
                    raise error.TestFail("'%s' not found in output: %s"
                                         % (expect_match, out))

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            cmd = "kill -SIGTERM `pidof libvirtd`"
            utils.run(cmd)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(),
                                      timeout=30)
            if not ret:
                raise error.TestFail("Failed to kill libvirtd. %s" % bug_url)
Example #25
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        dest_path = os.path.join(data_dir.get_data_dir(), "copy")

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define',
                             'undefine', 'crash', 'device-removal-failed',
                             'watchdog', 'io-error']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, new_disk, target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "blockcommit":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs)
                    snapshot_path = dom.get_blk_devices()['vda']['source']
                    virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % snapshot_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % disk_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda completed")
                    os.unlink(snapshot_path)
                elif event == "blockcopy":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    dom.undefine()
                    virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % disk_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % dest_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda completed")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name, dimm_xml.xml,
                                        flagstr="--config", **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live)
                    expected_events_list.append("'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " + "%s" % action)
                    if action == 'pause':
                        expected_events_list.append("'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part), shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail("Domain state should still be paused due to I/O error!")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
            if os.path.exists(dest_path):
                os.unlink(dest_path)
        return [(dom.name, event) for event in expected_events_list]