Beispiel #1
0
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """
    def make_disk_snapshot():
        # Make four external snapshots for disks only
        for count in range(1, 5):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if disk_xml.source.attrs.has_key('file'):
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0], count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif (disk_xml.source.attrs.has_key('name')
                      and disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0], count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif (disk_xml.source.attrs.has_key('dev')
                      or disk_xml.source.attrs.has_key('name')):
                    if (disk_xml.type_name == 'block'
                            or disk_src_protocol == 'iscsi'):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if new_attrs.has_key('dev'):
                            del new_attrs['dev']
                        elif new_attrs.has_key('name'):
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)

            if snapshot_result.exit_status != 0:
                raise error.TestFail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                raise error.TestFail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        raise error.TestFail("There are snapshots created for %s already" %
                             vm_name)

    snapshot_external_disks = []
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                raise error.TestNAError("'iscsi' disk doesn't support in"
                                        " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                raise error.TestNAError("Snapshot on glusterfs not"
                                        " support in current "
                                        "version. Check more info "
                                        " with https://bugzilla.re"
                                        "dhat.com/show_bug.cgi?id="
                                        "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4)
                or disk_src_protocol == 'gluster'):
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        # Run test case
        result = virsh.blockpull(vm_name, blk_target, blockpull_options,
                                 **virsh_dargs)
        status = result.exit_status

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name,
                                               snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s" % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        raise error.TestFail(err_msg)
                elif "base" or "shallow" in base_option:
                    chain_lst = snap_src_lst[::-1]
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)
                    val_tmp = []
                    for i in range(1, base_index):
                        val_tmp.append(chain_lst[i])
                    for i in val_tmp:
                        chain_lst.remove(i)
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        raise error.TestFail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                raise error.TestFail("blockpull failed: %s" % output)

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {
        'model': sec_model,
        'label': img_label,
        'relabel': sec_relabel
    }
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {
                    'name': vol_name,
                    'format': vol_format,
                    'capacity': 1073741824,
                    'allocation': 1048576,
                }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" %
                                      qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path,
                                                  context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{
                "attrs": {
                    source_type: img_path
                },
                "seclabels": source_seclabel
            })
        else:
            disk_source = disk_xml.new_disk_source(
                **{"attrs": {
                    source_type: img_path
                }})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml,
                                         flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(
                                val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1,
                                                   0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s",
                              to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
Beispiel #3
0
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """
    def make_disk_snapshot(postfix_n,
                           snapshot_take,
                           is_check_snapshot_tree=False,
                           is_create_image_file_in_vm=False):
        """
        Make external snapshots for disks only.

        :param postfix_n: postfix option
        :param snapshot_take: snapshots taken.
        :param is_create_image_file_in_vm: create image file in VM.
        """
        # Add all disks into command line.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, snapshot_take):
            options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)

            if is_check_snapshot_tree:
                options = options.replace("--no-metadata", "")
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                test.fail("Failed to make snapshots for disks!")

            if is_create_image_file_in_vm:
                create_file_cmd = "dd if=/dev/urandom of=/mnt/snapshot_%s.img bs=1M count=2" % count
                session.cmd_status_output(create_file_cmd)
                created_image_files_in_vm.append("snapshot_%s.img" % count)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

        def check_snapshot_tree():
            """
            Check whether predefined snapshot names are equals to
            snapshot names by virsh snapshot-list --tree
            """
            predefined_snapshot_name_list = []
            for count in range(1, snapshot_take):
                predefined_snapshot_name_list.append("%s_%s" %
                                                     (postfix_n, count))
            snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name
            result_output = process.run(snapshot_list_cmd,
                                        ignore_status=True,
                                        shell=True).stdout_text
            virsh_snapshot_name_list = []
            for line in result_output.rsplit("\n"):
                strip_line = line.strip()
                if strip_line and "|" not in strip_line:
                    virsh_snapshot_name_list.append(strip_line)
            # Compare two lists in their order and values, all need to be same.
            compare_list = [
                out_p for out_p, out_v in zip(predefined_snapshot_name_list,
                                              virsh_snapshot_name_list)
                if out_p not in out_v
            ]
            if compare_list:
                test.fail("snapshot tree not correctly returned.")

        # If check_snapshot_tree is True, check snapshot tree output.
        if is_check_snapshot_tree:
            check_snapshot_tree()

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        first_disk_src = first_device['source']
        return first_disk_src

    def make_relative_path_backing_files(pre_set_root_dir=None):
        """
        Create backing chain files of relative path.
        :param pre_set_root_dir: preset root dir
        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        if pre_set_root_dir is None:
            root_dir = os.path.dirname(first_disk_source)
        else:
            root_dir = pre_set_root_dir
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        if pre_set_root_dir:
            backing_file_dict["b"] = "%s" % first_disk_source
            backing_file_dict["c"] = "%s/b/b.img" % root_dir
            backing_file_dict["d"] = "%s/c/c.img" % root_dir
        disk_format = params.get("disk_format", "qcow2")
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = (
                "cd %s && qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s.img"
                % (backing_file_path, "qcow2", value, disk_format, key))
            ret = process.run(cmd, shell=True)
            disk_format = "qcow2"
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail(
                "The disk image path:%s doesn't include the origin image: %s" %
                (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    def create_reuse_external_snapshots(pre_set_root_dir=None):
        """
        Create reuse external snapshots
        :param pre_set_root_dir: preset root directory
        :return: absolute path of base file
        """
        if pre_set_root_dir is None:
            first_disk_source = get_first_disk_source()
            basename = os.path.basename(first_disk_source)
            root_dir = os.path.dirname(first_disk_source)
        else:
            root_dir = pre_set_root_dir
        meta_options = " --reuse-external --disk-only --no-metadata"
        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "b.img"
        backing_file_dict["c"] = "c.img"
        backing_file_dict["d"] = "d.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            external_snap_shot = "%s/%s" % (backing_file_path, value)
            snapshot_external_disks.append(external_snap_shot)
            options = "%s --diskspec %s,file=%s" % (meta_options, disk_target,
                                                    external_snap_shot)
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  options,
                                                  ignore_status=False,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result)
        logging.debug('reuse external snapshots:%s' % snapshot_external_disks)
        return root_dir

    def check_file_in_vm():
        """
        Check whether certain image files exists in VM internal.
        """
        for img_file in created_image_files_in_vm:
            status, output = session.cmd_status_output("ls -l /mnt/%s" %
                                                       img_file)
            logging.debug(output)
            if status:
                test.fail(
                    "blockcommit from top to base failed when ls image file in VM: %s"
                    % output)

    def do_blockcommit_pivot_repeatedly():
        """
        Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735
        """
        # Make external snapshot,pivot and delete snapshot file repeatedly.
        tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2"
        block_target = 'vda'
        for count in range(0, 5):
            options = "%s " % tmp_snapshot_name
            options += "--disk-only --atomic"
            disk_external = os.path.join(tmp_dir, tmp_snapshot_name)
            options += "  --diskspec  %s,snapshot=external,file=%s" % (
                block_target, disk_external)
            virsh.snapshot_create_as(vm_name,
                                     options,
                                     ignore_status=False,
                                     debug=True)
            virsh.blockcommit(vm_name,
                              block_target,
                              " --active --pivot ",
                              ignore_status=False,
                              debug=True)
            virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata")
            libvirt.delete_local_disk('file', disk_external)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    cmd_timeout = params.get("cmd_timeout", "1")
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}
    check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no")
    bandwidth = params.get("blockcommit_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    disk_target = params.get("disk_target", "vda")
    disk_format = params.get("disk_format", "qcow2")
    reuse_external_snapshot = "yes" == params.get("reuse_external_snapshot",
                                                  "no")
    restart_vm_before_commit = "yes" == params.get("restart_vm_before_commit",
                                                   "no")
    check_image_file_in_vm = "yes" == params.get("check_image_file_in_vm",
                                                 "no")
    pre_set_root_dir = None
    blk_source_folder = None
    convert_qcow2_image_to_raw = "yes" == params.get(
        "convert_qcow2_image_to_raw", "no")
    repeatedly_do_blockcommit_pivot = "yes" == params.get(
        "repeatedly_do_blockcommit_pivot", "no")
    from_top_without_active_option = "yes" == params.get(
        "from_top_without_active_option", "no")
    top_to_middle_keep_overlay = "yes" == params.get(
        "top_to_middle_keep_overlay", "no")
    block_disk_type_based_on_file_backing_file = "yes" == params.get(
        "block_disk_type_based_on_file_backing_file", "no")
    block_disk_type_based_on_gluster_backing_file = "yes" == params.get(
        "block_disk_type_based_on_gluster_backing_file", "no")

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
    )
    backing_file_relative_path = "yes" == params.get(
        "backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            test.cancel("live active block commit is not supported"
                        " in current libvirt version.")

    # This is brought by new feature:block-dev
    if (libvirt_version.version_compare(6, 0, 0)
            and params.get("transport", "") == "rdma"):
        test.cancel("If blockdev is enabled, the transport protocol 'rdma' is "
                    "not yet supported.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    cmd_session = None
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ''
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'block' and reuse_external_snapshot:
            first_disk = vm.get_first_disk_devices()
            pre_set_root_dir = os.path.dirname(first_disk['source'])

        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide rbd host first.")

                params.update({
                    "disk_source_name":
                    os.path.join(
                        pool_name,
                        'rbd_' + utils_misc.generate_random_string(4) + '.img')
                })
                if utils_package.package_install(["ceph-common"]):
                    ceph.rbd_image_rm(
                        mon_host,
                        *params.get("disk_source_name").split('/'))
                else:
                    test.error('Failed to install ceph-common to clean image.')
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({
                    'disk_source_name': replace_disk_image,
                    'disk_type': 'file',
                    'disk_src_protocol': 'file'
                })
                vm.start()
            if convert_qcow2_image_to_raw:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                blk_source_image_after_converted = "%s/converted_%s" % (
                    blk_source_folder, blk_source_image)
                # Convert the image from qcow2 to raw
                convert_disk_cmd = ("qemu-img convert"
                                    " -O %s %s %s" %
                                    (disk_format, first_src_file,
                                     blk_source_image_after_converted))
                process.run(convert_disk_cmd, ignore_status=False, shell=True)
                params.update({
                    'disk_source_name': blk_source_image_after_converted,
                    'disk_type': 'file',
                    'disk_src_protocol': 'file'
                })
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        if repeatedly_do_blockcommit_pivot:
            do_blockcommit_pivot_repeatedly()

        # Create block type disk on file backing file
        if block_disk_type_based_on_file_backing_file or block_disk_type_based_on_gluster_backing_file:
            if not vm.is_alive():
                vm.start()
            first_src_file = get_first_disk_source()
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
            iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True)
            block_type_backstore = iscsi_target
            if block_disk_type_based_on_file_backing_file:
                first_src_file = get_first_disk_source()
            if block_disk_type_based_on_gluster_backing_file:
                first_src_file = "gluster://%s/%s/gluster.qcow2" % (
                    params.get("gluster_server_ip"), params.get("vol_name"))
            backing_file_create_cmd = (
                "qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s" %
                ("qcow2", first_src_file, "qcow2", block_type_backstore))
            process.run(backing_file_create_cmd,
                        ignore_status=False,
                        shell=True)
            meta_options = " --reuse-external --disk-only --no-metadata"
            options = "%s --diskspec %s,file=%s" % (meta_options, 'vda',
                                                    block_type_backstore)
            virsh.snapshot_create_as(vm_name,
                                     options,
                                     ignore_status=False,
                                     debug=True)

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []
        created_image_files_in_vm = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        if reuse_external_snapshot:
            make_relative_path_backing_files(pre_set_root_dir)
            blk_source_folder = create_reuse_external_snapshots(
                pre_set_root_dir)
        else:
            make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree,
                               check_image_file_in_vm)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = ''
            disk_xmls = vmxml.get_devices(device_type="disk")
            for disk in disk_xmls:
                if disk.get('device_tag') == 'disk':
                    disk_xml = disk
                    break

            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            if libvirt_version.version_compare(6, 0, 0):
                bs_source = {'file': blk_source}
                bs_dict = {
                    "type": params.get("disk_type", "file"),
                    "format": {
                        'type': params.get("disk_format", "qcow2")
                    }
                }
                new_bs = disk_xml.new_backingstore(**bs_dict)
                new_bs["source"] = disk_xml.backingstore.new_source(
                    **bs_source)
                disk_xml.backingstore = new_bs
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n, snapshot_take)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(list(range(snapshot_take))):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                if disk.device != 'disk':
                    continue
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            test.fail("Can't find disk xml with target %s" % blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                test.fail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout %s" % cmd_timeout

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source
        if len(bandwidth):
            blockcommit_options += " --bandwidth %s" % bandwidth
        if bandwidth_byte:
            blockcommit_options += " --bytes"
        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            if reuse_external_snapshot:
                index = len(snapshot_external_disks) - 2
                top_image = snapshot_external_disks[index]
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if from_top_without_active_option:
            blockcommit_options = blockcommit_options.replace("--active", "")

        if top_to_middle_keep_overlay:
            blockcommit_options = blockcommit_options.replace("--active", "")
            blockcommit_options = blockcommit_options.replace("--pivot", "")
            blockcommit_options += " --keep-overlay"

        if restart_vm_before_commit:
            top = 2
            base = len(snapshot_external_disks)
            blockcommit_options = (
                "--top %s[%d] --base %s[%d] --verbose --wait --keep-relative" %
                (disk_target, top, disk_target, base))
            vm.destroy(gracefully=True)
            vm.start()

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        if backing_file_relative_path:
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            block_commit_index = snapshot_take
            expect_backing_file = False
            # Do block commit using --active
            for count in range(1, snapshot_take):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            if top_inactive:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_xml = ''
                disk_xmls = vmxml.get_devices(device_type="disk")
                for disk in disk_xmls:
                    if disk.get('device_tag') == 'disk':
                        disk_xml = disk
                        break

                top_index = 1
                try:
                    top_index = disk_xml.backingstore.index
                except AttributeError:
                    pass
                else:
                    top_index = int(top_index)

                block_commit_index = snapshot_take - 1
                expect_backing_file = True
            for count in range(1, block_commit_index):
                # Do block commit with --wait if top_inactive
                if top_inactive:
                    blockcommit_options = ("  --wait --verbose --top vda[%d] "
                                           "--base vda[%d] --keep-relative" %
                                           (top_index, top_index + 1))
                    if not libvirt_version.version_compare(6, 0, 0):
                        top_index = 1
                    else:
                        top_index += 1
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            check_chain_backing_files(blk_source_image, expect_backing_file)
            return

        if reuse_external_snapshot and not top_inactive:
            block_commit_index = len(snapshot_external_disks) - 1
            for index in range(block_commit_index):
                # Do block commit with --shallow --wait
                external_blockcommit_options = (
                    "  --shallow --wait --verbose --top %s " %
                    (snapshot_external_disks[index]))

                res = virsh.blockcommit(vm_name, blk_target,
                                        external_blockcommit_options,
                                        **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            # Do blockcommit with top active
            result = virsh.blockcommit(vm_name, blk_target,
                                       blockcommit_options, **virsh_dargs)
            # Check status_error
            libvirt.check_exit_status(result, status_error)
            return

        # Start one thread to check the bandwidth in output
        if bandwidth and bandwidth_byte:
            bandwidth += 'B'
            pool = ThreadPool(processes=1)
            pool.apply_async(
                check_bandwidth_thread,
                (libvirt.check_blockjob, vm_name, blk_target, bandwidth, test))

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target, blockcommit_options,
                                   **virsh_dargs)
        # Check status_error
        libvirt.check_exit_status(result, status_error)

        # Skip check chain file as per test case description
        if restart_vm_before_commit:
            return

        if check_image_file_in_vm:
            check_file_in_vm()

        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    test.fail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    test.fail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                test.fail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                test.fail("blockcommit job type '%s'"
                                          " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug(
                                    "after active block commit job "
                                    "ready for pivot, the target disk"
                                    " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                            cmd_result = virsh.blockjob(vm_name,
                                                        blk_target,
                                                        '',
                                                        ignore_status=True,
                                                        debug=True)
                            libvirt.check_exit_status(cmd_result)
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s" %
                              disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    test.fail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)

        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if backing_file_relative_path or reuse_external_snapshot:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            if blk_source_folder:
                process.run("cd %s && rm -rf b c d" % blk_source_folder,
                            shell=True)
        if disk_src_protocol == 'iscsi' or 'iscsi_target' in locals():
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
        # Recover images xattr if having some
        dirty_images = get_images_with_xattr(vm)
        if dirty_images:
            clean_images_with_xattr(dirty_images)
            test.fail("VM's image(s) having xattr left")
Beispiel #4
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.wait_for_login()

    def create_iface_xml(mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        iface.source = iface_source
        iface.model = iface_model if iface_model else "virtio"
        if iface_target:
            iface.target = {'dev': iface_target}
        iface.mac_address = mac
        logging.debug("Create new interface xml: %s", iface)
        return iface

    status_error = "yes" == params.get("status_error", "no")

    # Interface specific attributes.
    iface_num = params.get("iface_num", '1')
    iface_type = params.get("iface_type", "network")
    iface_source = eval(params.get("iface_source", "{'network':'default'}"))
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_mac = params.get("iface_mac")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_iface = "yes" == params.get("attach_iface", "no")
    attach_option = params.get("attach_option", "")
    detach_device = "yes" == params.get("detach_device")
    stress_test = "yes" == params.get("stress_test")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    username = params.get("username")
    password = params.get("password")

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    #iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        try:
            # Attach an interface when vm is running
            iface_list = []
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            if attach_device:
                for i in range(int(iface_num)):
                    logging.info("Try to attach interface loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                    else:
                        mac = utils_net.generate_mac_address_simple()
                    iface_xml_obj = create_iface_xml(mac)
                    iface_xml_obj.xmltreefile.write()
                    ret = virsh.attach_device(vm_name,
                                              iface_xml_obj.xml,
                                              flagstr=attach_option,
                                              ignore_status=True)
                    if ret.exit_status:
                        if any([msg in ret.stderr for msg in err_msgs]):
                            logging.debug("No more pci slots, can't"
                                          " attach more devices")
                            break
                        elif (ret.stderr.count("doesn't support option %s" %
                                               attach_option)):
                            raise error.TestNAError(ret.stderr)
                        elif status_error:
                            continue
                        else:
                            logging.error("Command output %s" %
                                          ret.stdout.strip())
                            raise error.TestFail("Failed to attach-device")
                    elif stress_test:
                        # Detach the device immediately for stress test
                        ret = virsh.detach_device(vm_name,
                                                  iface_xml_obj.xml,
                                                  flagstr=attach_option,
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        iface_list.append({
                            'mac': mac,
                            'iface_xml': iface_xml_obj
                        })
                # Need sleep here for attachment take effect
                time.sleep(5)
            elif attach_iface:
                for i in range(int(iface_num)):
                    logging.info("Try to attach interface loop %s" % i)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    ret = virsh.attach_interface(vm_name,
                                                 options,
                                                 ignore_status=True)
                    if ret.exit_status:
                        if any([msg in ret.stderr for msg in err_msgs]):
                            logging.debug("No more pci slots, can't"
                                          " attach more devices")
                            break
                        elif (ret.stderr.count("doesn't support option %s" %
                                               attach_option)):
                            raise error.TestNAError(ret.stderr)
                        elif status_error:
                            continue
                        else:
                            logging.error("Command output %s" %
                                          ret.stdout.strip())
                            raise error.TestFail("Failed to attach-interface")
                    elif stress_test:
                        # Detach the device immediately for stress test
                        ret = virsh.attach_interface(vm_name,
                                                     options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        iface_list.append({'mac': mac})
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Restart libvirtd service
            if restart_libvirtd:
                libvirtd.restart()
                vm.cleanup_serial_console()

            # Start the domain if needed
            if vm.is_dead():
                vm.start()
            session = vm.wait_for_serial_login(username=username,
                                               password=password)

            # Check if interface was attached
            for iface in iface_list:
                if iface.has_key('mac'):
                    # Check interface in dumpxml output
                    if_attr = vm_xml.VMXML.get_iface_by_mac(
                        vm_name, iface['mac'])
                    if not if_attr:
                        raise error.TestFail("Can't see interface "
                                             " in dumpxml")
                    if (if_attr['type'] != iface_type
                            or if_attr['source'] != iface_source['network']):
                        raise error.TestFail("Interface attribute doesn't"
                                             " match attachment opitons")
                    # Check interface on guest
                    if not utils_net.get_linux_ifname(session, iface['mac']):
                        raise error.TestFail("Can't see interface" " on guest")

            # Detach hot/cold-plugged interface at last
            if detach_device:
                for iface in iface_list:
                    if iface.has_key('iface_xml'):
                        ret = virsh.detach_device(vm_name,
                                                  iface['iface_xml'].xml,
                                                  flagstr="",
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        options = ("%s --mac %s" % (iface_type, iface['mac']))
                        ret = virsh.detach_interface(vm_name,
                                                     options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)

                # Check if interface was detached
                for iface in iface_list:
                    if iface.has_key('mac'):
                        # Check interface in dumpxml output
                        if vm_xml.VMXML.get_iface_by_mac(
                                vm_name, iface['mac']):
                            raise error.TestFail("Interface still exist"
                                                 " after detachment")
            session.close()
        except virt_vm.VMStartError, e:
            logging.info(str(e))
            raise error.TestFail('VM Failed to start for some reason!')

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Beispiel #5
0
def run(test, params, env):
    """
    Test autogenerated tap device name.

    1.Prepare test environment, restart libvirtd to flush the counter.
    2.start the domain.
    3.check the tap device name, then destroy the vm, restart libvirtd if
      needed, then start the vm again;
    4.hotplug an interface and check the tap device name;
    5.detach one interface, restart libvirtd if needed, then hotplug again to
      check the tap device name;
    6.hotplug interface of another type, check tap device name;
    7.recover the env.
    """
    def prepare_vmxml(vm, vm_name, direct=False):
        """
        Ensure there is only 1 requested interface in the vmxml

        param vm: the test vm
        param vm_name: the vm'name
        param direct: True or False, if True, prepare vm xml with a direct type
                      interface(the tap device will be named as macvtap*);
                      if False , prepare vm xml with a network type interface
                      connected to default network(the tap device will be named
                      as vnet* automatically instead)
        :return: None
        """
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        iface_dict = prepare_iface_dict(direct)
        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)

    def prepare_iface_dict(direct=False):
        """
        Prepare the iface_dict for the function 'libvirt.modify_vm_iface()' to use

        :param direct: True or False
        :return: a dictionary
        """
        if direct:
            iface_name = utils_net.get_net_if(state="UP")[0]
            source = "{'dev': '%s', 'mode': 'bridge'}" % iface_name
            iface_dict = {"model": "virtio", 'type': 'direct', 'source': source,
                          'del_addr': 'yes', 'del_alias': 'yes',
                          'del_target': 'yes'}
        else:
            iface_dict = {"model": "virtio", 'type': 'network',
                          'source': "{'network': 'default'}",
                          'del_addr': 'yes', 'del_alias': 'yes',
                          'del_target': 'yes'}
        return iface_dict

    def check_target_name(index, direct=False):
        """
        Check the auto generated tap device name on the host

        param index: an integer range in {-1,}, the max index occupied, if there
                    is no occupied, initial index is -1
        param direct: True or False.
                      True: the expected auto-generated tap device name should
                            be 'macvtap${index+1}'
                      False: the expected auto-generated tap device name should
                            be "vnet${index+1}"
        :return: None
        """
        cmd_output = process.run("ls /sys/class/net", shell=True,
                                 ignore_status=True).stdout_text
        logging.debug("Current network interfaces on the host includes: %s",
                      cmd_output)
        index_ = int(index)
        if direct:
            expected_name = 'macvtap' + str(index_ + 1)
        else:
            expected_name = "vnet" + str(index_ + 1)
        if expected_name not in cmd_output:
            test.fail("Can not get the expected tap: %s" % expected_name)
        else:
            logging.debug("Get the expected tap: %s" % expected_name)
        return

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    test_macvtap = "yes" == params.get("test_macvtap", "no")
    flush_with_occupation = "yes" == params.get("flush_with_occupation", "no")
    flush_after_detach = "yes" == params.get("flush_after_detach", "no")

    # if there is existing vnet* or macvtap* even the test vm is destroyed,
    # the env is not clean, cancel the test
    cmd_output = process.run("ls /sys/class/net", shell=True,
                             ignore_status=True).stdout_text
    if ('vnet' in cmd_output and not test_macvtap) or \
            ('macvtap' in cmd_output and test_macvtap):
        test.cancel("The env is not clean, there is existing tap device!")
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        prepare_vmxml(vm, vm_name, test_macvtap)
        libvirt_network.ensure_default_network()
        libvirtd.restart()
        # if there is no vm running, restart libvirtd will flush the counter
        # to the initial value: -1
        counter = -1
        vm.start()
        # check the auto generated tap device name after fresh initialized
        logging.debug("1. Check the tap device name after initialized:")
        check_target_name(counter, test_macvtap)
        # if the vm start successfully and tap device create successfully,
        # current counter should increase by 1
        counter = counter + 1
        # destroy and start the vm again
        vm.destroy()
        time.sleep(2)
        # flush when vm down, if flushed, the counter is initialized to -1
        if flush_with_occupation:
            libvirtd.restart()
            counter = -1
        time.sleep(2)
        vm.start()
        logging.debug("2. Check tap name after destroy and start vm again with "
                      "libvirtd restart: %s:", flush_with_occupation)
        check_target_name(counter, test_macvtap)
        # new tap created after vm start, counter increase by 1
        counter = counter + 1
        # add another interface with the same interface type
        if_dict = prepare_iface_dict(test_macvtap)
        mac_addr = utils_net.generate_mac_address_simple()
        if_dict['mac'] = mac_addr
        iface_add_xml = libvirt.modify_vm_iface(vm_name, 'get_xml', if_dict)
        virsh.attach_device(vm_name, iface_add_xml, debug=True, ignore_status=False)
        time.sleep(2)
        logging.debug("3. Check tap name after hotplug an interface:")
        check_target_name(counter, test_macvtap)
        # one interface with same iface type attached, counter increase by 1
        counter = counter + 1
        # Make sure the guest boots up. Otherwise detach_device won't complete
        vm.wait_for_serial_login(timeout=180).close()
        # detach the new attached interface
        virsh.detach_device(vm_name, iface_add_xml, wait_for_event=True,
                            debug=True, ignore_status=False)
        if flush_after_detach:
            libvirtd.restart()
            # the latest occupied name is recycled after restart
            # the counter is not initialized to -1 as the vm is running and
            # first tap name is occupied
            counter = counter - 1
        virsh.attach_device(vm_name, iface_add_xml, debug=True, ignore_status=False)
        logging.debug("4 Check tap name after detach and reattach with "
                      "flushed: %s:", flush_after_detach)
        check_target_name(counter, test_macvtap)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test command: virsh nodedev-reset <device>
    When `device_option` is:
    1) resettable   : Reset specified device if it is resettable.
    2) non-exist    : Try to reset specified device which doesn't exist.
    3) non-pci      : Try to reset all local non-PCI devices.
    4) unresettable : Try to reset all unresettable PCI devices.
    """
    def get_pci_info():
        """
        Get infomation for all PCI devices including:
        1) whether device has reset under its sysfs dir.
        2) Whether device has driver dir under its sysfs dir.

        :return: A dict using libvirt canonical nodedev name as keys
                 and dicts like {'reset': True, 'driver': True} as values
        """
        devices = {}
        pci_path = '/sys/bus/pci/devices'
        for device in os.listdir(pci_path):
            # Generate a virsh nodedev format device name
            dev_name = re.sub(r'\W', '_', 'pci_' + device)

            dev_path = os.path.join(pci_path, device)
            # Check whether device has `reset` file
            reset_path = os.path.join(dev_path, 'reset')
            has_reset = os.path.isfile(reset_path)

            # Check whether device has `driver` file
            driver_path = os.path.join(dev_path, 'driver')
            has_driver = os.path.isdir(driver_path)

            info = {'reset': has_reset, 'driver': has_driver}
            devices[dev_name] = info
        return devices

    def test_nodedev_reset(devices, expect_error):
        """
        Test nodedev-reset command on a list of devices

        :param devices        : A list of node devices to be tested.
        :param expect_error : 'yes' for expect command run successfully
                                 and 'no' for fail.
        """
        for device in devices:
            result = virsh.nodedev_reset(device)
            logging.debug(result)
            # Check whether exit code match expectation.
            libvirt.check_exit_status(result, expect_error)

    # Retrive parameters
    expect_error = params.get('expect_error', 'no') == 'yes'
    device_option = params.get('device_option', 'valid')
    unspecified = 'REPLACE_WITH_TEST_DEVICE'

    # Backup original libvirtd status and prepare libvirtd status
    logging.debug('Preparing libvirtd')
    libvirtd = utils_libvirtd.Libvirtd()
    if params.get("libvirtd", "on") == "off":
        libvirtd.stop()

    # Get whether PCI devices are resettable from sysfs.
    devices = get_pci_info()

    # Devide PCI devices into to catagories.
    resettable_nodes = []
    unresettable_nodes = []
    for device in devices:
        info = devices[device]
        if info['reset'] and info['driver']:
            resettable_nodes.append(device)
        if not info['reset'] and not info['driver']:
            unresettable_nodes.append(device)

    # Find out all non-PCI devices.
    all_devices = virsh.nodedev_list().stdout.strip().splitlines()
    non_pci_nodes = []
    for device in all_devices:
        if device not in devices:
            non_pci_nodes.append(device)

    try:
        if device_option == 'resettable':
            specified_device = resettable_nodes[0]
            # Test specified resettable device.
            if specified_device != unspecified:
                if specified_device in resettable_nodes:
                    test_nodedev_reset([specified_device], expect_error)
                else:
                    test.error('Param specified_device is not set!')
            else:
                test.cancel('Param specified_device is not set!')
        elif device_option == 'non-exist':
            specified_device = params.get('specified_device', unspecified)
            # Test specified non-exist device.
            if specified_device != unspecified:
                if specified_device not in all_devices:
                    test_nodedev_reset([specified_device], expect_error)
                else:
                    test.error('Specified device exists!')
            else:
                test.cancel('Param specified_device is not set!')
        elif device_option == 'non-pci':
            # Test all non-PCI device.
            if non_pci_nodes:
                test_nodedev_reset(non_pci_nodes, expect_error)
            else:
                test.cancel('No non-PCI device found!')
        elif device_option == 'unresettable':
            # Test all unresettable device.
            if unresettable_nodes:
                test_nodedev_reset(unresettable_nodes, expect_error)
            else:
                test.cancel('No unresettable device found!')
        else:
            test.error('Unrecognisable device option %s!' % device_option)
    finally:
        # Restore libvirtd status
        logging.debug('Restoring libvirtd')
        if not libvirtd.is_running():
            libvirtd.start()
Beispiel #7
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    utils.run("cp %s %s" % (QEMU_CONF, QEMU_CONF_BK))

    dump_path = os.path.join(test.tmpdir, "dump/")
    dump_file = ""
    if vm_action == "crash":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Set on_crash action
        vmxml.on_crash = vm_oncrash_action
        # Add <panic> device to domain
        panic_dev = Panic()
        panic_dev.addr_type = "isa"
        panic_dev.addr_iobase = "0x505"
        vmxml.add_device(panic_dev)
        vmxml.sync()
        # Config auto_dump_path in qemu.conf
        cmd = "echo auto_dump_path = \\\"%s\\\" >> %s" % (dump_path, QEMU_CONF)
        utils.run(cmd)
        libvirtd_service.restart()
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            dump_file = dump_path + vm_name + "-*"
        # Start VM and check the panic device
        virsh.start(vm_name, ignore_status=False)
        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Skip this test if no panic device find
        if not vmxml_new.xmltreefile.find('devices').findall('panic'):
            raise error.TestNAError("No 'panic' device in the guest, maybe "
                                    "your libvirt version doesn't support it")
    try:
        if vm_action == "suspend":
            virsh.suspend(vm_name, ignore_status=False)
        elif vm_action == "resume":
            virsh.suspend(vm_name, ignore_status=False)
            virsh.resume(vm_name, ignore_status=False)
        elif vm_action == "destroy":
            virsh.destroy(vm_name, ignore_status=False)
        elif vm_action == "start":
            virsh.destroy(vm_name, ignore_status=False)
            virsh.start(vm_name, ignore_status=False)
        elif vm_action == "kill":
            libvirtd_service.stop()
            kill_process_by_pattern(vm_name)
            libvirtd_service.restart()
        elif vm_action == "crash":
            session = vm.wait_for_login()
            # Stop kdump in the guest
            session.cmd("service kdump stop", ignore_all_errors=True)
            # Enable sysRq
            session.cmd("echo 1 > /proc/sys/kernel/sysrq")
            # Send key ALT-SysRq-c to crash VM, and command will not return
            # as vm crashed, so fail early for 'destroy' and 'preserve' action.
            # For 'restart', 'coredump-restart' and 'coredump-destroy' actions,
            # they all need more time to dump core file or restart OS, so using
            # the default session command timeout(60s)
            try:
                if vm_oncrash_action in ['destroy', 'preserve']:
                    timeout = 3
                else:
                    timeout = 60
                session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout)
            except ShellTimeoutError:
                pass
            session.close()
    except error.CmdError, e:
        raise error.TestError("Guest prepare action error: %s" % e)
Beispiel #8
0
def run(test, params, env):
    """
    Test interface xml options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Perform test operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    if (vm_name != "lxc_test_vm1"):
        vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}
    hook_file = params.get("hook_file", "/etc/libvirt/hooks/qemu")
    hook_log = params.get("hook_log", "/tmp/qemu.log")
    machine_type = params.get("machine_type", "")

    def prepare_hook_file(hook_op):
        """
        Create hook file.
        """
        logging.info("hook script: %s", hook_op)
        hook_lines = hook_op.split(';')
        hook_dir = os.path.dirname(hook_file)
        logging.info("hook script: %s", hook_op)
        if not os.path.exists(hook_dir):
            os.mkdir(hook_dir)
        with open(hook_file, 'w') as hf:
            hf.write('\n'.join(hook_lines))
        os.chmod(hook_file, 0o755)

        # restart libvirtd
        libvirtd.restart()
        if utils_split_daemons.is_modular_daemon() and test_network:
            utils_libvirtd.Libvirtd("virtnetworkd").restart()

    def check_hooks(opt):
        """
        Check hook operations in log file
        """
        logging.debug("Trying to check the string '%s'" " in logfile", opt)
        if not os.path.exists(hook_log):
            logging.debug("Log file doesn't exist")
            return False

        logs = None
        with open(hook_log, 'r') as lf:
            logs = lf.read()
        if not logs:
            return False

        logging.debug("Read from hook log file: %s", logs)
        if opt in logs:
            return True
        else:
            return False

    def start_stop_hook():
        """
        Do start/stop operation and check the results.
        """
        logging.info("Try to test start/stop hooks...")
        hook_para = "%s %s" % (hook_file, vm_name)
        prepare_hook_file(hook_script % (vm_name, hook_log))
        vm.start()
        vm.wait_for_login().close()
        try:
            hook_str = hook_para + " prepare begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " started begin -"
            assert check_hooks(hook_str)
            # stop the vm
            vm.destroy()
            hook_str = hook_para + " stopped end -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " release end -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check start/stop hooks.")

    def save_restore_hook():
        """
        Do save/restore operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script % (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script % (vm_name, hook_log))
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for save operation")
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for restore operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()
        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check restore hooks.")

    def managedsave_hook():
        """
        Do managedsave operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script % (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script % (vm_name, hook_log))
        ret = virsh.managedsave(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for" " managedsave operation")
        vm.start()
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for" " managedsave operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()

        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check managedsave hooks.")

    def libvirtd_hook():
        """
        Check the libvirtd hooks.
        """
        prepare_hook_file(hook_script % (vm_name, hook_log))
        hook_para = "%s %s" % (hook_file, vm_name)
        time.sleep(2)
        libvirtd.restart()
        try:
            hook_str = hook_para + " reconnect begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check libvirtd hooks")

    def lxc_hook():
        """
        Check the lxc hooks.
        """

        if platform.platform().count('el8'):
            test.cancel("lxc is not supported in rhel8")
        test_xml = vm_xml.VMXML("lxc")

        root_dir = data_dir.get_root_dir()
        lxc_xml_related_path_file = params.get("lxc_xml_file")
        lxc_xml_path_file = os.path.join(root_dir, lxc_xml_related_path_file)
        with open(lxc_xml_path_file, 'r') as fd:
            test_xml.xml = fd.read()

        uri = "lxc:///"
        vm_name = "lxc_test_vm1"
        hook_para = "%s %s" % (hook_file, vm_name)
        prepare_hook_file(hook_script % hook_log)
        exit1 = params.get("exit1", "no")
        output = virsh.create(test_xml.xml, options="--console", uri=uri)

        if output.exit_status:
            logging.debug("output.stderr1: %s", output.stderr.lower())
            if (exit1 == "yes" and "hook script execution failed"
                    in output.stderr.lower()):
                return True
            else:
                test.fail("Create %s domain failed:%s" %
                          ("lxc", output.stderr))
        logging.info("Domain %s created, will check with console", vm_name)

        hook_str = hook_para + " prepare begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)
        hook_str = hook_para + " start begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)

        virsh.destroy(vm_name, options="", uri=uri)

        hook_str = hook_para + " stopped end -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)
        hook_str = hook_para + " release end -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)

    def daemon_hook():
        """
        Check the libvirtd hooks.
        """
        # stop daemon first
        libvirtd.stop()
        prepare_hook_file(hook_script % hook_log)
        try:
            libvirtd.start()
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)
            # Restart libvirtd and test again
            if os.path.exists(hook_log):
                os.remove(hook_log)
            libvirtd.restart()
            hook_str = hook_file + " - shutdown - shutdown"
            assert check_hooks(hook_str)
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)

            # kill the daemon with SIGHUP
            if os.path.exists(hook_log):
                os.remove(hook_log)

            daemon_process = utils_libvirtd.Libvirtd().service_name
            utils_misc.signal_program(daemon_process, 1, '/var/run')

            hook_str = hook_file + " - reload begin SIGHUP"
            assert check_hooks(hook_str)

        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check daemon hooks")

    def attach_hook():
        """
        Check attach hooks.
        """
        # Start a domain with qemu command.
        disk_src = vm.get_first_disk_devices()['source']
        vm_test = "foo"
        prepare_hook_file(hook_script % (vm_test, hook_log))
        qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
        if "ppc" in platform.machine():
            qemu_cmd = ("%s -machine pseries"
                        " -drive file=%s,if=none,bus=0,unit=1"
                        " -monitor unix:/tmp/demo,"
                        "server,nowait -name %s" %
                        (qemu_bin, disk_src, vm_test))
        else:
            qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                        " -monitor unix:/tmp/demo,"
                        "server,nowait -name %s" %
                        (qemu_bin, disk_src, vm_test))
        # After changed above command, qemu-attach failed
        os.system('%s &' % qemu_cmd)
        sta, pid = process.getstatusoutput("pgrep qemu-kvm")
        if not pid:
            test.fail("Cannot get pid of qemu command")
        try:
            ret = virsh.qemu_attach(pid, **virsh_dargs)
            if ret.exit_status:
                utils_misc.kill_process_tree(pid)
                test.fail("Cannot attach qemu process")
            else:
                virsh.destroy(vm_test)
        except Exception as detail:
            utils_misc.kill_process_tree(pid)
            test.fail("Failed to attach qemu process: %s" % str(detail))
        hook_str = hook_file + " " + vm_test + " attach begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check attach hooks")

    def edit_iface(net_name):
        """
        Edit interface options for vm.
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        iface_xml = vmxml.get_devices(device_type="interface")[0]
        vmxml.del_device(iface_xml)
        iface_xml.type_name = "network"
        iface_xml.source = {"network": net_name}
        del iface_xml.address
        vmxml.add_device(iface_xml)
        vmxml.sync()

    def network_hook():
        """
        Check network hooks.
        """
        # Set interface to use default network
        net_name = params.get("net_name", "default")
        edit_iface(net_name)
        prepare_hook_file(hook_script % (net_name, hook_log))
        try:
            # destroy the network
            ret = virsh.net_destroy(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " stopped end -"
            assert check_hooks(hook_str)

            # start network
            ret = virsh.net_start(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_file + " " + net_name + " started begin -"
            assert check_hooks(hook_str)
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Remove all controllers, interfaces and addresses in vm dumpxml
            vm_inactive_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm_inactive_xml.remove_all_device_by_type('controller')
            type_dict = {'address': '/devices/*/address'}
            try:
                for elem in vm_inactive_xml.xmltreefile.findall(
                        type_dict['address']):
                    vm_inactive_xml.xmltreefile.remove(elem)
            except (AttributeError, TypeError) as details:
                test.fail("Fail to remove address.")
            vm_inactive_xml.xmltreefile.write()
            machine_list = vm_inactive_xml.os.machine.split("-")

            # Modify machine type according to the requirements and Add controllers to VM according to machine type

            def generate_controller(controller_dict):
                controller_xml = Controller("controller")
                controller_xml.model = controller_dict['model']
                controller_xml.type = controller_dict['type']
                controller_xml.index = controller_dict['index']
                return controller_xml

            if machine_type == 'pc':
                vm_inactive_xml.set_os_attrs(
                    **{
                        "machine": machine_list[0] + "-i440fx-" +
                        machine_list[2]
                    })
                pc_Dict0 = {'model': 'pci-root', 'type': 'pci', 'index': 0}
                pc_Dict1 = {'model': 'pci-bridge', 'type': 'pci', 'index': 1}
                vm_inactive_xml.add_device(generate_controller(pc_Dict0))
                vm_inactive_xml.add_device(generate_controller(pc_Dict1))
            elif machine_type == 'q35':
                vm_inactive_xml.set_os_attrs(
                    **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
                q35_Dict0 = {'model': 'pcie-root', 'type': 'pci', 'index': 0}
                q35_Dict1 = {
                    'model': 'pcie-root-port',
                    'type': 'pci',
                    'index': 1
                }
                q35_Dict2 = {
                    'model': 'pcie-to-pci-bridge',
                    'type': 'pci',
                    'index': 2
                }
                vm_inactive_xml.add_device(generate_controller(q35_Dict0))
                vm_inactive_xml.add_device(generate_controller(q35_Dict1))
                vm_inactive_xml.add_device(generate_controller(q35_Dict2))
            vm_inactive_xml.sync()

            # Plug a interface and Unplug the interface
            vm.start()
            vm.wait_for_login().close()
            interface_num = len(
                vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                    "interface"))
            mac_addr = "52:54:00:9a:53:a9"
            logging.debug(vm_xml.VMXML.new_from_dumpxml(vm_name))

            def is_attached_interface():
                return len(
                    vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                        "interface")) == interface_num + 1

            ret = virsh.attach_interface(vm_name, ("network %s --mac %s" %
                                                   (net_name, mac_addr)))
            libvirt.check_exit_status(ret)
            if utils_misc.wait_for(is_attached_interface,
                                   timeout=20) is not True:
                test.fail("Attaching interface failed.")
            if libvirt_version.version_compare(6, 0, 0):
                hook_str = hook_file + " " + net_name + " port-created begin -"
            else:
                hook_str = hook_file + " " + net_name + " plugged begin -"
            assert check_hooks(hook_str)

            def is_detached_interface():
                return len(
                    vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                        "interface")) == interface_num

            ret = virsh.detach_interface(vm_name,
                                         "network --mac %s" % mac_addr)
            libvirt.check_exit_status(ret)
            utils_misc.wait_for(is_detached_interface, timeout=50)
            # Wait for timeout and if not succeeded, detach again (during testing, detaching interface failed from q35 VM for the first time when using this function)
            if len(
                    vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                        "interface")) != interface_num:
                ret = virsh.detach_interface(vm_name,
                                             "network --mac %s" % mac_addr)
                libvirt.check_exit_status(ret)
            if utils_misc.wait_for(is_detached_interface,
                                   timeout=50) is not True:
                test.fail("Detaching interface failed.")
            if libvirt_version.version_compare(6, 0, 0):
                hook_str = hook_file + " " + net_name + " port-deleted begin -"
            else:
                hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
            # remove the log file
            if os.path.exists(hook_log):
                os.remove(hook_log)
            # destroy the domain
            vm.destroy()
            if libvirt_version.version_compare(6, 0, 0):
                hook_str = hook_file + " " + net_name + " port-deleted begin -"
            else:
                hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check network hooks")

    def run_scale_test():
        """
        Try to start and stop domain many times.
        """
        prepare_hook_file(hook_script)
        loop_num = int(params.get("loop_num", 30))
        loop_timeout = int(params.get("loop_timeout", 600))
        cmd1 = ("for i in {1..%s};do echo $i 'start guest -';"
                "virsh start %s;sleep 1;echo $i 'stop guest -';"
                "virsh destroy %s;sleep 1;done;" %
                (loop_num, vm_name, vm_name))
        cmd2 = ("for i in {1..%s};do virsh list;sleep 1;done;" % loop_num * 2)
        utils_misc.run_parallel([cmd1, cmd2], timeout=loop_timeout)

    start_error = "yes" == params.get("start_error", "no")
    test_start_stop = "yes" == params.get("test_start_stop", "no")
    test_lxc = "yes" == params.get("test_lxc", "no")
    test_attach = "yes" == params.get("test_attach", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_saverestore = "yes" == params.get("test_saverestore", "no")
    test_daemon = "yes" == params.get("test_daemon", "no")
    test_network = "yes" == params.get("test_network", "no")
    if not test_lxc:
        basic_test = "yes" == params.get("basic_test", "yes")
        scale_test = "yes" == params.get("scale_test", "yes")
    else:
        basic_test = "no" == params.get("basic_test", "yes")
        scale_test = "no" == params.get("scale_test", "yes")
    domainxml_test = "yes" == params.get("domainxml_test", "no")

    # The hook script is provided from config
    hook_script = params.get("hook_script")

    # Destroy VM first
    if vm_name != "lxc_test_vm1" and vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    if vm_name != "lxc_test_vm1":
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        try:
            if test_start_stop:
                start_stop_hook()
            elif test_attach:
                attach_hook()
            elif start_error:
                prepare_hook_file(hook_script % (vm_name, hook_log))
            elif test_daemon:
                daemon_hook()
            elif test_network:
                network_hook()
            elif scale_test:
                run_scale_test()
            # Start the domain
            if vm_name != "lxc_test_vm1" and vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if test_libvirtd:
                libvirtd_hook()
            elif test_saverestore:
                save_restore_hook()
            elif test_managedsave:
                managedsave_hook()
            if test_lxc:
                lxc_hook()

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if start_error:
                pass
            else:
                test.fail('VM Failed to start for some reason!')
        else:
            if start_error:
                test.fail('VM started unexpected')

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if test_managedsave:
            virsh.managedsave_remove(vm_name)
        if vm_name != "lxc_test_vm1" and vm.is_alive():
            vm.destroy(gracefully=False)
        if os.path.exists(hook_file):
            os.remove(hook_file)
        if os.path.exists(hook_log):
            os.remove(hook_log)
        libvirtd.restart()
        if vm_name != "lxc_test_vm1":
            vmxml_backup.sync()
Beispiel #9
0
def run(test, params, env):
    """
    Test command: virsh net-edit <network>

    1) Define a temp virtual network
    2) Execute virsh net-edit to modify it
    3) Dump its xml then check it
    """
    def edit_net_xml():
        edit_cmd = r":%s /100.254/100.253"

        session = aexpect.ShellSession("sudo -s")
        try:
            logging.info("Execute virsh net-edit %s", net_name)
            session.sendline("virsh net-edit %s" % net_name)

            logging.info("Change the ip value of dhcp end")
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
        except (aexpect.ShellError, aexpect.ExpectError) as details:
            log = session.get_output()
            session.close()
            test.fail("Failed to do net-edit: %s\n%s" % (details, log))

    # Gather test parameters
    net_name = params.get("net_edit_net_name", "editnet")
    test_create = "yes" == params.get("test_create", "no")

    virsh_dargs = {'debug': True, 'ignore_status': True}
    virsh_instance = virsh.VirshPersistent(**virsh_dargs)

    # Get all network instance
    nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance)

    # First check if a bridge of this name already exists
    # Increment suffix integer from 1 then append it to net_name
    # till there is no name conflict.
    if net_name in nets:
        net_name_fmt = net_name + "%d"
        suffix_num = 1
        while ((net_name_fmt % suffix_num) in nets):
            suffix_num += 1

        net_name = net_name_fmt % suffix_num

    virtual_net = """
<network>
  <name>%s</name>
  <forward mode='nat'/>
  <bridge name='%s' stp='on' delay='0' />
  <mac address='52:54:00:03:78:6c'/>
  <ip address='192.168.100.1' netmask='255.255.255.0'>
    <dhcp>
      <range start='192.168.100.2' end='192.168.100.254' />
    </dhcp>
  </ip>
</network>
""" % (net_name, net_name)

    try:
        test_xml = network_xml.NetworkXML(network_name=net_name)
        test_xml.xml = virtual_net
        if test_create:
            test_xml.create()
        else:
            test_xml.define()
    except xcepts.LibvirtXMLError as detail:
        test.cancel("Failed to define a test network.\n"
                    "Detail: %s." % detail)

    # Run test case
    try:
        libvirtd = utils_libvirtd.Libvirtd()
        if test_create:
            # Restart libvirtd and check state
            libvirtd.restart()
            net_state = virsh.net_state_dict()
            if (not net_state[net_name]['active']
                    or net_state[net_name]['autostart']
                    or net_state[net_name]['persistent']):
                test.fail("Found wrong network states"
                          " after restarting libvirtd: %s" % net_state)
        else:
            libvirtd.restart()
            net_state = virsh.net_state_dict()
            if (net_state[net_name]['active']
                    or net_state[net_name]['autostart']
                    or not net_state[net_name]['persistent']):
                test.fail("Found wrong network states: %s" % net_state)
            virsh.net_start(net_name)
        edit_net_xml()
        if test_create:
            # Network become persistent after editing
            net_state = virsh.net_state_dict()
            if (not net_state[net_name]['active']
                    or net_state[net_name]['autostart']
                    or not net_state[net_name]['persistent']):
                test.fail("Found wrong network states"
                          " after editing: %s" % net_state)

        cmd_result = virsh.net_dumpxml(net_name, '--inactive', debug=True)
        if cmd_result.exit_status:
            test.fail("Failed to dump xml of virtual network %s" % net_name)

        # The xml should contain the match_string
        match_string = "100.253"
        xml = cmd_result.stdout.strip()
        if not re.search(match_string, xml):
            test.fail("The xml is not expected")
        # The active xml should not contain the match_string
        cmd_result = virsh.net_dumpxml(net_name, debug=True)
        if cmd_result.exit_status:
            test.fail("Failed to dump active xml of virtual network %s" %
                      net_name)
        # The xml should contain the match_string
        match_string = "100.253"
        xml = cmd_result.stdout.strip()
        if re.search(match_string, xml):
            test.fail("The active xml should not change")

    finally:
        test_xml.orbital_nuclear_strike()
Beispiel #10
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if len(node_list) < 2:
            test.cancel('Online NUMA nodes less than 2')
        node_a, node_b = min(node_list), max(node_list)
        numa_memory.update({'nodeset': '%d,%d' % (node_a, node_b)})
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name,
                                0,
                                cpu,
                                debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
Beispiel #11
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': vm_sec_model,
                'relabel': vm_sec_relabel}
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel,
                    'sec_label': sec_label}
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    logging.debug("The guest failed to start as expected,"
                                  "details see bug: bugzilla.redhat.com/show_bug.cgi"
                                  "?id=1165485")
                else:
                    test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                if libvirt_version.version_compare(3, 1, 0) and disk_type == "block":
                    output = to_text(process.system_output(
                        "nsenter -t %d -m -- ls -l %s" % (vm_pid, disks[disk_target]['source'])))
                    owner, group = output.strip().split()[2:4]
                    disk_context = format_user_group_str(owner, group)
                else:
                    stat_re = os.stat(disks[disk_target]['source'])
                    disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        test.fail("The disk label is not equal to "
                                  "'%s'." % sec_label_id)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
Beispiel #12
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
        6. specify vcpu affinity for inactive vcpu
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    setvcpus_option = params.get("setvcpus_option", "")
    setvcpus_count = params.get("setvcpus_count", "0")
    vcpupin_option = params.get("vcpupin_option", "")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    hotplug_vcpu = "yes" == params.get("hotplug_vcpu", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "180"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpuutil.total_cpus_count())
        if hostcpu_num < 8:
            test.cancel("The host should have at least 8 CPUs for this test.")

        # online all host cpus
        for x in range(1, hostcpu_num):
            if cpuutil.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

            # Remove numatune node since it will be automatically set
            # under 'auto' state
            if vcpu_placement == 'auto':
                vmxml.xmltreefile.remove_by_xpath('/numatune', remove_all=True)
                vmxml.xmltreefile.write()

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # test vcpu cpuset in offline/online host cpu scenario
        if check.endswith("offline_hostcpu"):
            for x in offline_hostcpus.split(','):
                if cpuutil.offline(x):
                    test.fail("fail to offline cpu{}".format(x))
                logging.debug("offline host cpu {}".format(x))

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online host cpu scenario
            if check.endswith("offline_hostcpu") and not status_error:
                # online host cpu
                if cpuutil.online(cputune_cpuset):
                    test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, vcpupin_option, debug=True)

            # hotplug vcpu test scenario
            if hotplug_vcpu:
                virsh.setvcpus(vm_name, setvcpus_count, setvcpus_option, debug=True, ignore_status=False)

            libvirtd_restart = False
            while True:
                if check == "vcpu_placement":
                    check_vcpu_placement(test, params)
                elif not status_error:
                    check_vcpu_affinity()
                if libvirtd_restart:
                    break
                # restart libvirtd and check vcpu affinity again
                utils_libvirtd.Libvirtd().restart()
                libvirtd_restart = True

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(1, hostcpu_num):
            cpuutil.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Beispiel #13
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel("Forbid using relative path or file name only is added since libvirt-3.0.0")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" % (target,
                                                                  vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user,
                  'debug': True, 'ignore_status': True, 'timeout': timeout}

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" %
                  libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" % (dest_path,
                                                               expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        # Remove non-storage disk such as 'cdrom'
        for disk in disks:
            if disk.device != 'disk':
                disks.remove(disk)
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if 'file' in disk_xml.source.attrs:
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif ('dev' in disk_xml.source.attrs or
              'name' in disk_xml.source.attrs or
              'pool' in disk_xml.source.attrs):
            if (disk_xml.type_name == 'block' or
                    disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if 'name' in new_attrs:
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif 'pool' in new_attrs:
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(
            vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine("--nvram")
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target,
                                         dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() + cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                     options, **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth in ['0B', '0M']) and not utl.check_blockjob(vm_name, target, "bandwidth",
                                                                                  bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name, save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip()) or
                            chk_libvirtd_log(libvirtd_log_path,
                                             log_pattern, "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name,
                                       ignore_status=True).exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
def run(test, params, env):
    """
    Test when the PCI configuration file is in read-only mode
    """
    def test_vf_hotplug():
        """
        Hot-plug VF to VM

        """
        logging.info("Preparing a running guest...")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=180)

        logging.info("Attaching VF to the guest...")
        mac_addr = utils_net.generate_mac_address_simple()
        iface_dict = eval(
            params.get('iface_dict', '{"hostdev_addr": "%s"}') %
            utils_sriov.pci_to_addr(vf_pci))
        iface = interface.Interface("hostdev")
        iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict)
        virsh.attach_device(vm_name,
                            iface.xml,
                            debug=True,
                            ignore_status=False)

        logging.info("Checking VF in the guest...")
        vm_iface_types = [
            iface.get_type_name() for iface in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface")
        ]
        if 'hostdev' not in vm_iface_types:
            test.fail('Unable to get hostdev interface!')
        if cmd_in_vm:
            if not utils_misc.wait_for(
                    lambda: not vm_session.cmd_status(cmd_in_vm), 30, 10):
                test.fail("Can not get the Virtual Function info on vm!")
        vm_session.close()

    libvirt_version.is_libvirt_feature_supported(params)

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    cmd_in_vm = params.get("cmd_in_vm")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    default_vf = sriov_base.setup_vf(pf_pci, params)
    vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
    dev_name = utils_sriov.get_device_name(vf_pci)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd('virtqemud')
    try:
        virsh.nodedev_detach(dev_name, debug=True, ignore_status=False)
        logging.info("Re-mounting sysfs with ro mode...")
        utils_misc.mount('/sys', '', None, 'remount,ro')
        libvirtd.restart()
        run_test()
    finally:
        logging.info("Recover test enviroment.")
        utils_misc.mount('/sys', '', None, 'remount,rw')
        sriov_base.recover_vf(pf_pci, params, default_vf)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()
        virsh.nodedev_reattach(dev_name, debug=True)
Beispiel #15
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        test.cancel("This version of libvirt does not support "
                    "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    machine_type = params.get("machine_type", "")
    disk_device = params.get("disk_device", "")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")
    disk_snapshot_attr = params.get('disk_snapshot_attr', 'external')
    set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no")

    # gluster related params
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if disk_device == 'lun' and machine_type == 's390-ccw-virtio':
        params['disk_target_bus'] = 'scsi'
        logging.debug(
            "Setting target bus scsi because machine type has virtio 1.0."
            " See https://bugzilla.redhat.com/show_bug.cgi?id=1365823")

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_src_protocol == 'gluster':
            test.cancel("Snapshot on glusterfs not support in "
                        "current version. Check more info with "
                        "https://bugzilla.redhat.com/buglist.cgi?"
                        "bug_id=1017289,1032370")

    if libvirt_version.version_compare(5, 5, 0):
        # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and
        # --print-xml to be used together.
        if "--no-metadata" in options and "--print-xml" in options:
            logging.info("--no-metadata and --print-xml can be used together "
                         "in this libvirt version. Not expecting a failure.")
            status_error = "no"

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk)
        with open(bad_disk, 'w') as bad_file:
            pass

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         params.get(external_disk))
                process.run("qemu-img create -f qcow2 %s 1G" % disk_path,
                            shell=True)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            process.run("chmod 500 %s" % disk_path, shell=True)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(),
                                             "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        if replace_vm_disk:
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if set_snapshot_attr:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = vmxml_backup.get_devices(device_type="disk")[0]
            vmxml_new.del_device(disk_xml)
            # set snapshot attribute in disk xml
            disk_xml.snapshot = disk_snapshot_attr
            new_disk = disk.Disk(type_name='file')
            new_disk.xmltreefile = disk_xml.xmltreefile
            vmxml_new.add_device(new_disk)
            logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile)
            vmxml_new.sync()
            vm.start()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            vm.prepare_guest_agent()
            session = vm.wait_for_login()
            if start_ga == "no":
                # The qemu-ga could be running and should be killed
                session.cmd("kill -9 `pidof qemu-ga`")
                # Check if the qemu-ga get killed
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if not stat_ps:
                    # As managed by systemd and set as autostart, qemu-ga
                    # could be restarted, so use systemctl to stop it.
                    session.cmd("systemctl stop qemu-guest-agent")
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        test.cancel("Fail to stop agent in " "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)
        else:
            # Remove channel if exist
            if vm.is_alive():
                vm.destroy(gracefully=False)
            xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name)
            xml_inst.remove_agent_channels()
            vm.start()

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         'disk%s.qcow2' % i)
                process.run("qemu-img create -f qcow2 %s 200M" % disk_path,
                            shell=True)
                adisk = list(
                    (opt_names["diskopts_%s" % str(i + 1)]).split(","))
                virsh.attach_disk(vm_name, disk_path, adisk[0], debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name,
                                                   options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name,
                                                      options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
                # for multi snapshots without specific snapshot name, the
                # snapshot name is using time string with 1 second
                # incremental, to avoid get snapshot failure with same name,
                # sleep 1 second here.
                if int(multi_num) > 1:
                    time.sleep(1.1)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    test.fail("Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            test.fail("Run failed but file %s exist" %
                                      option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            test.fail("Domain xml should not be "
                                      "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    test.fail("Run failed with right command: %s" % output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(test, vm_name, options, option_dict,
                                    output, snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    test.fail("'%s' was found: %s" %
                                              (pattern, line))

    finally:
        if vm.is_alive():
            vm.destroy()
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            test.fail("Still can find snapshot metadata")

        if disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd.restart()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd)

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                if reuse_external:
                    external_disk = "external_disk%s" % i
                    disk_path = os.path.join(data_dir.get_tmp_dir(),
                                             params.get(external_disk))
                    if os.path.exists(disk_path):
                        os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Beispiel #16
0
def run(test, params, env):
    """
    Test domiftune tuning

    1) Positive testing
       1.1) get the current domiftune parameters for a running guest
       1.2) set the current domiftune parameters for a running guest
    2) Negative testing
       2.1) get domiftune parameters
       2.2) set domiftune parameters
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    status_error = params.get("status_error", "no")
    start_vm = params.get("start_vm", "yes")
    change_parameters = params.get("change_parameters", "no")
    interface_ref = params.get("interface_ref", "name")
    netfloor = params.get("netfloor")
    pre_vmstate = params.get("pre_vmstate")
    interface = []
    change_net = False

    def set_netbw(netfloor, if_net, netxml):
        """
        Set network bandwidth as required.

        :param netfloor: action on network bandwidth, 'need' or 'delete'
        :param if_net: network name acquired from guest interface
        :param netxml: the network xml
        """
        if netfloor == 'delete':
            netxml.del_element('/bandwidth')
        elif netfloor == 'need':
            netxml.bandwidth_inbound = {'average': 100, 'peak': 200, 'burst': 128}
            netxml.bandwidth_outbound = {'average': 50, 'peak': 100, 'burst': 128}
        netxml.sync()
        virsh.net_dumpxml(if_net, debug=True)

    def get_iface(vm_name, if_mac):
        """
        Get the first interface dev from guest xml

        :param vm_name: guest name
        :param if_mac: the interface mac
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if_node = vmxml.get_iface_all().get(if_mac)
        return if_node

    if pre_vmstate == "shutoff" and vm.is_alive():
        vm.destroy()

    if_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    if netfloor:
        if_node = get_iface(vm_name, if_mac)
        if_net = if_node.find('source').get('network')
        netxml = network_xml.NetworkXML.new_from_net_dumpxml(if_net)
        netxml_backup = netxml.copy()
        if netfloor == 'delete' and 'bandwidth' in str(netxml) or \
                netfloor == 'need' and 'bandwidth' not in str(netxml):
            set_netbw(netfloor, if_net, netxml)
            change_net = True

    if vm and not vm.is_alive():
        vm.start()

    if vm and vm.is_alive():
        if_node = get_iface(vm_name, if_mac)
        if_name = if_node.find('target').get('dev')

    if interface_ref == "name":
        interface = if_name

    if interface_ref == "mac":
        interface = if_mac

    logging.debug("the interface is %s", interface)

    test_dict = dict(params)
    test_dict['vm'] = vm
    if interface:
        test_dict['iface_dev'] = interface

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    # positive and negative testing #########
    libvirtd = utils_libvirtd.Libvirtd()
    if change_parameters == "no":
        get_domiftune_parameter(test_dict, test, libvirtd)
    else:
        set_domiftune_parameter(test_dict, test, libvirtd)

    if change_parameters != "no":
        if change_net:
            netxml_backup.sync()
            if vm.is_alive():
                vm.destroy()
                vm.start()
        if not status_error and interface_ref == "mac":
            opt = 'config'
        else:
            opt = 'current'
        ret = virsh.domiftune(vm_name, if_mac, opt, '0', '0', debug=True)
        libvirt.check_exit_status(ret)
Beispiel #17
0
class EnvState(object):
    """
    Prepare environment state for test according to input parameters,
    and recover it after test.
    """
    sockets = []
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_config = utils_config.LibvirtQemuConfig()
    spice_x509_dir_real = ""
    spice_x509_dir_bak = ""
    vnc_x509_dir_real = ""
    vnc_x509_dir_bak = ""

    def _backup_dir(self, path):
        """
        Backup original libvirt spice or vnc x509 certification directory.
        """
        if os.path.isdir(path):
            backup_path = path + '.bak'
            if os.path.isdir(backup_path):
                shutil.rmtree(backup_path)
            shutil.move(path, backup_path)
            return backup_path
        else:
            return ""

    def _restore_dir(self, path, backup_path):
        """
        Restore original libvirt spice or vnc x509 certification directory
        from backup_path.
        """
        if os.path.isdir(path):
            shutil.rmtree(path)
        if backup_path:
            shutil.move(backup_path, path)

    def __init__(self, params, expected_result):
        spice_tls = params.get("spice_tls", "not_set")
        spice_listen = params.get("spice_listen", "not_set")
        vnc_tls = params.get("vnc_tls", "not_set")
        vnc_listen = params.get("vnc_listen", "not_set")
        spice_x509_dir = params.get("spice_x509_dir", "not_set")
        vnc_x509_dir = params.get("vnc_x509_dir", "not_set")
        spice_prepare_cert = params.get("spice_prepare_cert", "yes")
        vnc_prepare_cert = params.get("vnc_prepare_cert", "yes")
        port_min = params.get("remote_display_port_min", 'not_set')
        port_max = params.get("remote_display_port_max", 'not_set')
        auto_unix_socket = params.get("vnc_auto_unix_socket", 'not_set')
        tls_x509_verify = params.get("vnc_tls_x509_verify", 'not_set')

        if spice_x509_dir == 'not_set':
            self.spice_x509_dir_real = '/etc/pki/libvirt-spice'
        else:
            self.spice_x509_dir_real = spice_x509_dir

        self.spice_x509_dir_bak = self._backup_dir(self.spice_x509_dir_real)
        if spice_prepare_cert == 'yes':
            utils_misc.create_x509_dir(
                self.spice_x509_dir_real,
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test',
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test', 'none', True)

        if vnc_x509_dir == 'not_set':
            self.vnc_x509_dir_real = '/etc/pki/libvirt-vnc'
        else:
            self.vnc_x509_dir_real = vnc_x509_dir

        self.vnc_x509_dir_bak = self._backup_dir(self.vnc_x509_dir_real)
        if vnc_prepare_cert == 'yes':
            utils_misc.create_x509_dir(
                self.vnc_x509_dir_real,
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test',
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test', 'none', True)

        if spice_x509_dir == 'not_set':
            del self.qemu_config.spice_tls_x509_cert_dir
        else:
            self.qemu_config.spice_tls_x509_cert_dir = spice_x509_dir

        if vnc_x509_dir == 'not_set':
            del self.qemu_config.vnc_tls_x509_cert_dir
        else:
            self.qemu_config.vnc_tls_x509_cert_dir = vnc_x509_dir

        if spice_tls == 'not_set':
            del self.qemu_config.spice_tls
        else:
            self.qemu_config.spice_tls = spice_tls

        if vnc_tls == 'not_set':
            del self.qemu_config.vnc_tls
        else:
            self.qemu_config.vnc_tls = vnc_tls

        if port_min == 'not_set':
            del self.qemu_config.remote_display_port_min
        else:
            self.qemu_config.remote_display_port_min = port_min

        if port_max == 'not_set':
            del self.qemu_config.remote_display_port_max
        else:
            self.qemu_config.remote_display_port_max = port_max

        if spice_listen == 'not_set':
            del self.qemu_config.spice_listen
        elif spice_listen in ['valid_ipv4', 'valid_ipv6']:
            expected_ip = str(expected_result['spice_ips'][0])
            self.qemu_config.spice_listen = expected_ip
        else:
            self.qemu_config.spice_listen = spice_listen

        if auto_unix_socket == 'not_set':
            del self.qemu_config.vnc_auto_unix_socket
        else:
            self.qemu_config.vnc_auto_unix_socket = auto_unix_socket

        if tls_x509_verify == 'not_set':
            del self.qemu_config.vnc_tls_x509_verify
        else:
            self.qemu_config.vnc_tls_x509_verify = tls_x509_verify

        if vnc_listen == 'not_set':
            del self.qemu_config.vnc_listen
        elif vnc_listen in ['valid_ipv4', 'valid_ipv6']:
            expected_ip = str(expected_result['vnc_ips'][0])
            self.qemu_config.vnc_listen = expected_ip
        else:
            self.qemu_config.vnc_listen = vnc_listen

        self.libvirtd.restart()

    def restore(self):
        """
        Recover environment state after test.
        """
        self._restore_dir(self.spice_x509_dir_real, self.spice_x509_dir_bak)
        self._restore_dir(self.vnc_x509_dir_real, self.vnc_x509_dir_bak)
        self.qemu_config.restore()
        self.libvirtd.restart()
Beispiel #18
0
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    virsh_instance = None

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            logging.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        logging.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        logging.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        logging.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name,
                            session_id=vmcheck.virsh_session_id).stdout
        logging.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_rhsrvany_md5(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        logging.info('Check md5 and sha1 of rhsrvany.exe')
        val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1')
        logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1)
        if not val_md5 or not val_sha1:
            test.error('No MD5 or SHA1 value provided')
        cmd_sha1 = params.get('cmd_sha1')
        cmd_md5 = cmd_sha1 + ' MD5'
        sha1 = vmcheck.session.cmd_output(
            cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '')
        md5 = vmcheck.session.cmd_output(
            cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '')
        logging.info('Actual MD5=%s, SHA1=%s', md5, sha1)
        if sha1 == val_sha1 and md5 == val_md5:
            logging.info('MD5 and SHA1 are correct')
        else:
            log_fail('MD5 or SHA1 of rhsrvany.exe not correct')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        logging.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        logging.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error and checkpoint != 'vdsm':
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s', str(e))
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            else:
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
            # Check specific checkpoints
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'sdl':
                if output_mode == 'libvirt':
                    vmchecker.check_graphics({'type': 'vnc'})
                elif output_mode == 'rhev':
                    vmchecker.check_graphics({'type': 'spice'})
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'rhsrvany_md5':
                check_rhsrvany_md5(vmchecker.checker)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        # Virtio drivers will not be installed without virtio-win setup
        if checkpoint == 'virtio_win_unset':
            missing_list = params.get('missing').split(',')
            expect_errors = ['Not find driver: ' + x for x in missing_list]
            logging.debug('Expect errors: %s' % expect_errors)
            logging.debug('Actual errors: %s' % error_list)
            if set(error_list) == set(expect_errors):
                error_list[:] = []
            else:
                logging.error('Virtio drivers not meet expectation')
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': xen_host,
            'hypervisor': 'xen',
            'main_vm': vm_name,
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'password': xen_host_passwd,
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # See man virt-v2v-input-xen(1)
        process.run('update-crypto-policies --set LEGACY',
                    verbose=True,
                    ignore_status=True,
                    shell=True)

        # Setup ssh-agent access to xen hypervisor
        logging.info('set up ssh-agent access ')
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(xen_host,
                                                              xen_host_user,
                                                              xen_host_passwd,
                                                              auto_close=False)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'output_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # To RHV doesn't support 'qcow2' right now
            v2v_params['output_format'] = 'raw'
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)
        virsh_dargs = {
            'uri': uri,
            'remote_ip': xen_host,
            'remote_user': '******',
            'remote_pwd': xen_host_passwd,
            'auto_close': True,
            'debug': True
        }
        virsh_instance = utils_v2v.wait_for(virsh.VirshPersistent,
                                            **virsh_dargs)
        logging.debug('A new virsh session %s was created', virsh_instance)
        if not utils_v2v.wait_for(virsh_instance.domain_exists, name=vm_name):
            test.error('VM %s not exists', vm_name)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params['hypervisor'] = 'kvm'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['storage'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name,
                                               params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get('vnc_passwd', 'redhat')
                }
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(vmxml,
                                               params[checkpoint]['passwd'],
                                               virsh_instance=virsh_instance)
            logging.debug(
                virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            logging.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            logging.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            logging.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                logging.debug(process.run('cat %s' % xml_file).stdout_text)
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            logging.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                        ssh_banner_content)
            logging.info('Content of ssh_banner file:')
            logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            logging.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if checkpoint.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if checkpoint.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if checkpoint.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if checkpoint.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            logging.debug(xml.xmltreefile)
            disks = xml.get_disk_all()
            logging.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.get(
                        'device') == 'cdrom' and disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            logging.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                logging.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            logging.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            logging.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()
            logging.debug('A SASL session %s was created', v2v_sasl)

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            logging.info('Total disks: %d', params['disk_count'])

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        logging.info("Cleaning Environment")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        # Restore crypto-policies to DEFAULT, the setting is impossible to be
        # other values by default in testing envrionment.
        process.run('update-crypto-policies --set DEFAULT',
                    verbose=True,
                    ignore_status=True,
                    shell=True)
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
        if virsh_instance:
            logging.debug('virsh session %s is closing', virsh_instance)
            virsh_instance.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if checkpoint == 'vdsm':
            logging.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    logging.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            logging.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'ssh_banner':
            logging.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
        utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
        process.run('ssh-agent -k')
Beispiel #19
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    def replace_qemu_cmdline(cmdline_list):
        """
        Replace the expected qemu command line for new machine type

        :param cmdline_list: The list for expected qemu command lines
        :return: The list contains the updated qemu command lines if any
        """
        os_xml = getattr(vmxml, "os")
        machine_ver = getattr(os_xml, 'machine')
        if (machine_ver.startswith("pc-q35-rhel")
                and machine_ver > 'pc-q35-rhel8.2.0'
                and libvirt_version.version_compare(6, 4, 0)):
            # Replace 'node,nodeid=0,cpus=0-1,mem=512' with
            # 'node,nodeid=0,cpus=0-1,memdev=ram-node0'
            # Replace 'node,nodeid=1,cpus=2-3,mem=512' with
            # 'node,nodeid=1,cpus=2-3,memdev=ram-node1'
            for cmd in cmdline_list:
                line = cmd['cmdline']
                node = line.split(',')[1][-1]
                cmd['cmdline'] = line.replace('mem=512',
                                              'memdev=ram-node{}'.format(node))

        return cmdline_list

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]),
                               int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1",
                    ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration." %
                            page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += cpu.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += cpu.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += cpu.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(
                        i['nodenum'], i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = vmcpuxml.dicts_to_cells(numa_cell)
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm " "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = cpu.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = cpu.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        cmdline_list = replace_qemu_cmdline(cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = cpu.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = cpu.cpus_parser(cpu_str)
            cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected" %
                          (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." %
                              topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                              i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Beispiel #20
0
def run(test, params, env):
    """
    Test the command virsh hostname

    (1) Call virsh hostname
    (2) Call virsh hostname with an unexpected option
    (3) Call virsh hostname with libvirtd service stop
    """
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd", None)
    remote_user = params.get("remote_user", "root")
    remote_uri = params.get("remote_uri", None)

    if remote_uri and remote_ip.count("EXAMPLE"):
        test.cancel("Pls configure rempte_ip first")

    session = None
    if remote_uri:
        session = remote.wait_for_login('ssh', remote_ip, '22', remote_user,
                                        remote_pwd, r"[\#\$]\s*$")
        hostname = session.cmd_output("hostname -f").strip()
    else:
        hostname_result = process.run("hostname -f",
                                      shell=True,
                                      ignore_status=True)
        hostname = hostname_result.stdout_text.strip()

    # Prepare libvirtd service on local
    check_libvirtd = "libvirtd" in params
    if check_libvirtd:
        libvirtd = params.get("libvirtd")
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

    # Start libvirtd on remote server
    if remote_uri:
        if not utils_package.package_install("libvirt", session):
            test.cancel("Failed to install libvirt on remote server")
        libvirtd = utils_libvirtd.Libvirtd(session=session)
        libvirtd.restart()

    # Run test case
    if remote_uri:
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)
    option = params.get("virsh_hostname_options")
    hostname_test = virsh.hostname(option,
                                   uri=remote_uri,
                                   ignore_status=True,
                                   debug=True)
    status = 0
    if hostname_test == '':
        status = 1
        hostname_test = None

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Close session
    if session:
        session.close()

    # Check status_error
    status_error = params.get("status_error")
    if status_error == "yes":
        if status == 0:
            if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0):
                logging.info(
                    "From libvirt version 5.6.0 libvirtd is restarted "
                    "and command should succeed.")
            else:
                test.fail("Command 'virsh hostname %s' succeeded "
                          "(incorrect command)" % option)
    elif status_error == "no":
        if hostname != hostname_test:
            test.fail("Virsh cmd gives hostname %s != %s." %
                      (hostname_test, hostname))
        if status != 0:
            test.fail("Command 'virsh hostname %s' failed "
                      "(correct command)" % option)
Beispiel #21
0
def run(test, params, env):
    """
    Test command: virsh setmem.

    1) Prepare vm environment.
    2) Handle params
    3) Prepare libvirtd status.
    4) Run test command and wait for current memory's stable.
    5) Recover environment.
    4) Check result.
    """
    def get_vm_usable_mem(session):
        """
        Get total usable RAM from /proc/meminfo
        """
        cmd = "cat /proc/meminfo"
        proc_mem = session.cmd_output(cmd)
        total_usable_mem = re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                                     proc_mem).group(1)
        return int(total_usable_mem)

    def vm_unusable_mem(session):
        """
        Get the unusable RAM of the VM.
        """
        # Get total physical memory from dmidecode
        cmd = "dmidecode -t 17"
        dmi_mem = session.cmd_output(cmd)
        dmi_mem_size = re.findall(r'Size:\s(\d+\s+[K|M|G]B)', dmi_mem)
        if not dmi_mem_size:
            test.fail("Cannot get memory size info inside VM.")
        total_physical_mem = 0
        for size_info in dmi_mem_size:
            mem_size = int(size_info.split()[0].strip())
            mem_unit = size_info.split()[1].strip()
            if mem_unit.lower() == 'kb':
                total_physical_mem += mem_size
            elif mem_unit.lower() == 'mb':
                total_physical_mem += mem_size * 1024
            elif mem_unit.lower() == 'gb':
                total_physical_mem += mem_size * 1048576
        return total_physical_mem - get_vm_usable_mem(session)

    def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
        """
        Create domain options of command
        """
        # Specify domain as argument or parameter
        if domarg == "yes":
            dom_darg_key = "domainarg"
        else:
            dom_darg_key = "domain"

        # How to reference domain
        if vm_ref == "domid":
            dom_darg_value = domid
        elif vm_ref == "domname":
            dom_darg_value = vm_name
        elif vm_ref == "domuuid":
            dom_darg_value = domuuid
        elif vm_ref == "none":
            dom_darg_value = None
        elif vm_ref == "emptystring":
            dom_darg_value = '""'
        else:  # stick in value directly
            dom_darg_value = vm_ref

        return {dom_darg_key: dom_darg_value}

    def make_sizeref(sizearg, mem_ref, original_mem):
        """
        Create size options of command
        """
        if sizearg == "yes":
            size_darg_key = "sizearg"
        else:
            size_darg_key = "size"

        if mem_ref == "halfless":
            size_darg_value = "%d" % (original_mem // 2)
        elif mem_ref == "halfmore":
            size_darg_value = "%d" % int(original_mem * 1.5)  # no fraction
        elif mem_ref == "same":
            size_darg_value = "%d" % original_mem
        elif mem_ref == "emptystring":
            size_darg_value = '""'
        elif mem_ref == "zero":
            size_darg_value = "0"
        elif mem_ref == "toosmall":
            size_darg_value = "1024"
        elif mem_ref == "toobig":
            size_darg_value = "1099511627776"  # (KiB) One Petabyte
        elif mem_ref == "none":
            size_darg_value = None
        else:  # stick in value directly
            size_darg_value = mem_ref

        return {size_darg_key: size_darg_value}

    def cal_deviation(actual, expected):
        """
        Calculate deviation of actual result and expected result
        """
        numerator = float(actual)
        denominator = float(expected)
        if numerator > denominator:
            numerator = denominator
            denominator = float(actual)
        return 100 - (100 * (numerator / denominator))

    def is_old_libvirt():
        """
        Check if libvirt is old version
        """
        regex = r'\s+\[--size\]\s+'
        return bool(not virsh.has_command_help_match('setmem', regex))

    def print_debug_stats(original_inside_mem, original_outside_mem,
                          test_inside_mem, test_outside_mem,
                          expected_outside_mem, expected_inside_mem,
                          delta_percentage, unusable_mem):
        """
        Print debug message for test
        """
        # Calculate deviation
        inside_deviation = cal_deviation(test_inside_mem, expected_inside_mem)
        outside_deviation = cal_deviation(test_outside_mem,
                                          expected_outside_mem)
        dbgmsg = ("Unusable memory of VM   : %d KiB\n"
                  "Original inside memory  : %d KiB\n"
                  "Expected inside memory  : %d KiB\n"
                  "Actual inside memory    : %d KiB\n"
                  "Inside memory deviation : %0.2f%%\n"
                  "Original outside memory : %d KiB\n"
                  "Expected outside memory : %d KiB\n"
                  "Actual outside memory   : %d KiB\n"
                  "Outside memory deviation: %0.2f%%\n"
                  "Acceptable deviation    : %0.2f%%" %
                  (unusable_mem, original_inside_mem, expected_inside_mem,
                   test_inside_mem, inside_deviation, original_outside_mem,
                   expected_outside_mem, test_outside_mem, outside_deviation,
                   delta_percentage))
        for dbgline in dbgmsg.splitlines():
            logging.debug(dbgline)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_ref = params.get("setmem_vm_ref", "")
    mem_ref = params.get("setmem_mem_ref", "")
    flags = params.get("setmem_flags", "")
    status_error = "yes" == params.get("status_error", "no")
    old_libvirt_fail = "yes" == params.get("setmem_old_libvirt_fail", "no")
    quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
    domarg = params.get("setmem_domarg", "no")
    sizearg = params.get("setmem_sizearg", "no")
    libvirt_status = params.get("libvirt", "on")
    delta_percentage = float(params.get("setmem_delta_per", "10"))
    start_vm = "yes" == params.get("start_vm", "yes")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
    manipulate_dom_before_setmem = "yes" == params.get(
        "manipulate_dom_before_setmem", "no")
    manipulate_dom_after_setmem = "yes" == params.get(
        "manipulate_dom_after_setmem", "no")
    manipulate_action = params.get("manipulate_action", "")
    readonly = "yes" == params.get("setmem_readonly", "no")
    expect_msg = params.get("setmem_err_msg")

    vm = env.get_vm(vm_name)
    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    vmosxml = vmxml.os
    need_mkswap = False
    if manipulate_action in ['s3', 's4']:
        vm.destroy()
        BIOS_BIN = "/usr/share/seabios/bios.bin"
        if os.path.isfile(BIOS_BIN):
            vmosxml.loader = BIOS_BIN
            vmxml.os = vmosxml
            vmxml.sync()
        else:
            logging.error("Not find %s on host", BIOS_BIN)
        vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.prepare_guest_agent()
        if manipulate_action == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

    memballoon_model = params.get("memballoon_model", "")
    if memballoon_model:
        vm.destroy()
        vmxml.del_device('memballoon', by_tag=True)
        memballoon_xml = vmxml.get_device_class('memballoon')()
        memballoon_xml.model = memballoon_model
        vmxml.add_device(memballoon_xml)
        logging.info(memballoon_xml)
        vmxml.sync()
        vm.start()

    remove_balloon_driver = "yes" == params.get("remove_balloon_driver", "no")
    if remove_balloon_driver:
        if not vm.is_alive():
            logging.error("Can't remove module as guest not running")
        else:
            session = vm.wait_for_login()
            cmd = "rmmod virtio_balloon"
            s_rmmod, o_rmmod = session.cmd_status_output(cmd)
            if s_rmmod != 0:
                logging.error(
                    "Fail to remove module virtio_balloon in guest:\n%s",
                    o_rmmod)
            session.close()
    # Get original data
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    uri = vm.connect_uri
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status('dmidecode'):
        # The physical memory size is in vm xml, use it when dmideode not
        # supported
        unusable_mem = int(vmxml.max_mem) - get_vm_usable_mem(session)
    else:
        unusable_mem = vm_unusable_mem(session)
    original_outside_mem = vm.get_used_mem()
    original_inside_mem = get_vm_usable_mem(session)
    session.close()
    # Prepare VM state
    if not start_vm:
        vm.destroy()
    else:
        if paused_after_start_vm:
            vm.pause()
    old_libvirt = is_old_libvirt()
    if old_libvirt:
        logging.info("Running test on older libvirt")
        use_kilobytes = True
    else:
        logging.info("Running test on newer libvirt")
        use_kilobytes = False

    # Argument pattern is complex, build with dargs
    dargs = {
        'flagstr': flags,
        'use_kilobytes': use_kilobytes,
        'uri': uri,
        'ignore_status': True,
        "debug": True,
        'readonly': readonly
    }
    dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
    dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))

    # Prepare libvirtd status
    libvirtd = utils_libvirtd.Libvirtd()
    if libvirt_status == "off":
        libvirtd.stop()
    else:
        if not libvirtd.is_running():
            libvirtd.start()

    if status_error or (old_libvirt_fail & old_libvirt):
        logging.info("Error Test: Expecting an error to occur!")

    try:
        memory_change = True
        if manipulate_dom_before_setmem:
            manipulate_domain(test, vm_name, manipulate_action)
            if manipulate_action in ['save', 'managedsave', 's4']:
                memory_change = False

        result = virsh.setmem(**dargs)
        status = result.exit_status

        if status is 0:
            logging.info("Waiting %d seconds for VM memory to settle",
                         quiesce_delay)
            # It takes time for kernel to settle on new memory
            # and current clean pages is not predictable. Therefor,
            # extremely difficult to determine quiescence, so
            # sleep one second per error percent is reasonable option.
            time.sleep(quiesce_delay)

        if manipulate_dom_before_setmem:
            manipulate_domain(test, vm_name, manipulate_action, True)
        if manipulate_dom_after_setmem:
            manipulate_domain(test, vm_name, manipulate_action)
            manipulate_domain(test, vm_name, manipulate_action, True)

        # Recover libvirtd status
        if libvirt_status == "off":
            libvirtd.start()

        # Gather stats if not running error test
        if not status_error and not old_libvirt_fail:
            # Expected results for both inside and outside
            if remove_balloon_driver:
                expected_mem = original_outside_mem
            else:
                if not memory_change:
                    expected_mem = original_inside_mem
                elif sizearg == "yes":
                    expected_mem = int(dargs["sizearg"])
                else:
                    expected_mem = int(dargs["size"])
            if memory_change:
                # Should minus unusable memory for inside memory check
                expected_inside_mem = expected_mem - unusable_mem
                expected_outside_mem = expected_mem
            else:
                expected_inside_mem = expected_mem
                expected_outside_mem = original_outside_mem

            def get_vm_mem():
                """
                Test results for both inside and outside
                :return: Get vm memory for both inside and outside
                """
                if not memory_change:
                    test_inside_mem = original_inside_mem
                    test_outside_mem = original_outside_mem
                else:
                    if vm.state() == "shut off":
                        vm.start()
                    elif vm.state() == "paused":
                        # Make sure it's never paused
                        vm.resume()
                    session = vm.wait_for_login()

                    # Actual results
                    test_inside_mem = get_vm_usable_mem(session)
                    session.close()
                    test_outside_mem = vm.get_used_mem()
                return (test_inside_mem, test_outside_mem)

            # Don't care about memory comparison on error test
            def verify_outside_result():
                _, test_outside_mem = get_vm_mem()
                return (cal_deviation(test_outside_mem, expected_outside_mem)
                        <= delta_percentage)

            def verify_inside_result():
                test_inside_mem, _ = get_vm_mem()
                return (cal_deviation(test_inside_mem, expected_inside_mem) <=
                        delta_percentage)

            msg = "test conditions not met: "
            error_flag = 0
            if status is not 0:
                error_flag = 1
                msg += "Non-zero virsh setmem exit code. "
            if not utils_misc.wait_for(verify_outside_result, timeout=240):
                error_flag = 1
                msg += "Outside memory deviated. "
            if not utils_misc.wait_for(verify_inside_result, timeout=240):
                error_flag = 1
                msg += "Inside memory deviated. "

            test_inside_mem, test_outside_mem = get_vm_mem()
            print_debug_stats(original_inside_mem, original_outside_mem,
                              test_inside_mem, test_outside_mem,
                              expected_outside_mem, expected_inside_mem,
                              delta_percentage, unusable_mem)
            if error_flag:
                test.fail(msg)
        elif not status_error and old_libvirt_fail:
            if status is 0:
                if old_libvirt:
                    test.fail("Error test did not result in an error")
            else:
                if not old_libvirt:
                    test.fail("Newer libvirt failed when it should not")
        else:  # Verify an error test resulted in error
            if status is 0:
                test.fail("Error test did not result in an error")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        vm.destroy()
        backup_xml.sync()
def run(test, params, env):
    """
    Test command: virsh qemu-agent-command.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("agent_cmd", "")
    options = params.get("options", "")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    status_error = "yes" == params.get("status_error", "no")
    if not status_error and options:
        option = options.split()[0]
        test_cmd = "qemu-agent-command"
        if virsh.has_command_help_match(test_cmd, option) is None:
            test.cancel("The current libvirt doesn't support"
                        " %s option for %s" % (option, test_cmd))
    guest_cpu_busy = "yes" == params.get("guest_cpu_busy", "no")
    password = params.get("password", None)
    domuuid = vm.get_uuid()
    domid = ""
    xml_file = os.path.join(data_dir.get_tmp_dir(), "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    libvirtd_inst = utils_libvirtd.Libvirtd()

    # Prepare domain
    try:
        reset_domain(test, vm, vm_state, needs_agent, guest_cpu_busy, password)
    except exceptions.TestCancel as details:
        reset_env(vm_name, xml_file)
        test.cancel(details)
    except Exception as details:
        reset_env(vm_name, xml_file)
        test.fail(details)

    if vm_state != "shut off":
        domid = vm.get_id()

    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid
    elif domid and vm_ref == "hex_id":
        vm_ref = hex(int(domid))

    try:
        if vm_state == "running" and needs_agent:
            # Check whether qemu-guest-agent is active in guest
            session = vm.wait_for_login()

            def verify_alive():
                return utils_misc.get_guest_service_status(
                    session, 'qemu-guest-agent') == 'active'

            if not utils_misc.wait_for(verify_alive, 30):
                test.error('Service "qemu-guest-agent" is not active')

        # Run virsh command
        cmd_result = virsh.qemu_agent_command(vm_ref,
                                              cmd,
                                              options,
                                              ignore_status=True,
                                              debug=True)
        status = cmd_result.exit_status

        # Check result
        if not libvirtd_inst.is_running():
            test.fail("Libvirtd is not running after run command.")
        if status_error:
            if not status:
                # Bug 853673
                err_msg = "Expect fail but run successfully, please check Bug: "
                err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id=853673"
                err_msg += " for more info"
                test.fail(err_msg)
            else:
                logging.debug("Command failed as expected.")
        else:
            if status:
                if cmd_result.stderr.count("not responding"):
                    test.cancel(cmd_result.stderr.strip())
                if cmd.count("guest-shutdown") and\
                   cmd_result.stderr.count("Missing monitor reply object"):
                    err_msg = "Please check bug: "
                    err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id="
                    err_msg += "1050843 for more info"
                    logging.error(err_msg)
                if "--async" in options:
                    err_msg = "Please check bug: "
                    err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id="
                    err_msg += "1099060 for more info"
                    logging.error(err_msg)
                test.fail("Expect succeed, but run fail.")
    finally:
        # Cleanup
        reset_env(vm_name, xml_file)
        if not libvirtd_inst.is_running():
            libvirtd_inst.restart()
Beispiel #23
0
def run(test, params, env):
    """
    Test set_process_name parameter in qemu.conf.

    1) Change set_process_name in qemu.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check if qemu command line changed accordingly;
    """
    def get_qemu_command_name_option(vm):
        """
        Get the name option of qemu command line of a libvirt VM.

        :param vm: A libvirt_vm.VM class instance.
        :return :  A string containing '-name' option of VM's qemu command
                   line or None if error.
        """
        if vm.is_dead():
            vm.start()

        # Get qemu command line.
        pid = vm.get_pid()
        res = process.run("ps -p %s -o cmd h" % pid, shell=True)

        if res.exit_status == 0:
            match = re.search(r'-name\s*(\S*)', res.stdout_text.strip())
            if match:
                return match.groups()[0]

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "name_not_set")
    set_process_name = params.get("set_process_name", "not_set")
    vm = env.get_vm(vm_name)

    # Get old qemu -name option.
    orig_qemu_name = get_qemu_command_name_option(vm)
    logging.debug('Original "-name" option of qemu command is '
                  '"%s".' % orig_qemu_name)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_process_name == 'not_set':
            del config.set_process_name
        else:
            config.set_process_name = set_process_name

        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with set_process_name = '
                          '%s' % set_process_name)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with set_process_name = '
                      '%s' % set_process_name)

        # Restart VM to create a new qemu command line.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        # Get new qemu -name option.
        new_qemu_name = get_qemu_command_name_option(vm)
        logging.debug('New "-name" option of qemu command is '
                      '"%s"' % new_qemu_name)

        if ',process=qemu:%s' % vm_name in new_qemu_name:
            if expected_result == 'name_not_set':
                test.fail('Qemu name is not expected to set, '
                          'but %s found' % new_qemu_name)
        else:
            if expected_result == 'name_set':
                test.fail('Qemu name is expected to set, '
                          'but %s found' % new_qemu_name)
    finally:
        config.restore()
        libvirtd.restart()
Beispiel #24
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set image and
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    (4) Destroy VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
    qemu_group_user = "******" == params.get("qemu_group_user", "no")
    # Get variables about seclabel for VM.
    sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
    sec_label = params.get("dac_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
    security_default_confined = params.get("security_default_confined", None)
    set_process_name = params.get("set_process_name", None)
    sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
    if sec_label:
        sec_dict['label'] = sec_label
    set_sec_label = "yes" == params.get("set_sec_label", "no")
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", None)
    qemu_group = params.get("qemu_group", None)
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('dac_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    qemu_disk_mod = False
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        label_list = img_label.split(":")
        os.chown(disk_path, int(label_list[0]), int(label_list[1]))
        os.close(f)
        st = os.stat(disk_path)
        if not bool(st.st_mode & stat.S_IWGRP):
            # add group wirte mode to disk by chmod g+w
            os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
            qemu_disk_mod = True

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    def _create_user():
        """
        Create a "vdsm_fake" in 'qemu' group for test
        """
        logging.debug("create a user 'vdsm_fake' in 'qemu' group")
        cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
        process.run(cmd, ignore_status=False, shell=True)

    create_qemu_user = False
    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Check qemu_group_user
        if qemu_group_user:
            if set_qemu_conf:
                if "EXAMPLE" in qemu_user:
                    if not check_qemu_grp_user("vdsm_fake", test):
                        _create_user()
                        create_qemu_user = True
                    qemu_user = "******"
                    qemu_group = "qemu"
            if set_sec_label:
                if sec_label:
                    if "EXAMPLE" in sec_label:
                        if not check_qemu_grp_user("vdsm_fake", test):
                            _create_user()
                            create_qemu_user = True
                        sec_label = "vdsm_fake:qemu"
                        sec_dict['label'] = sec_label
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        if set_qemu_conf:
            # Transform qemu user and group to "uid:gid"
            qemu_user = qemu_user.replace("+", "")
            qemu_group = qemu_group.replace("+", "")
            qemu_conf_label_trans = format_user_group_str(
                qemu_user, qemu_group)

            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            if security_default_confined:
                qemu_conf.security_default_confined = security_default_confined
            if set_process_name:
                qemu_conf.set_process_name = set_process_name
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if set_sec_label:
            # Transform seclabel to "uid:gid"
            if sec_label:
                sec_label = sec_label.replace("+", "")
                if ":" in sec_label:
                    user, group = sec_label.split(":")
                    sec_label_trans = format_user_group_str(user, group)

            # Set the context of the VM.
            logging.debug("sec_dict is %s" % sec_dict)
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()
            logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the qemu process and image.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)

            # Get vm image label when VM is running
            f = os.open(disks.values()[0]['source'], 0)
            stat_re = os.fstat(f)
            disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)

            # Check vm process and image DAC label after vm start
            if set_sec_label and sec_label:
                if ":" in sec_label:
                    if vm_context != sec_label_trans:
                        test.fail("Label of VM processs is not "
                                  "expected after starting.\nDetail:"
                                  "vm_context=%s, sec_label_trans=%s" %
                                  (vm_context, sec_label_trans))
                    if sec_relabel == "yes":
                        if dynamic_ownership:
                            if disk_context != sec_label_trans:
                                test.fail("Label of disk is not " +
                                          "expected" +
                                          " after VM starting.\n" +
                                          "Detail: disk_context" +
                                          "=%s" % disk_context +
                                          ", sec_label_trans=%s." %
                                          sec_label_trans)
            elif set_qemu_conf and not security_default_confined:
                if vm_context != qemu_conf_label_trans:
                    test.fail("Label of VM processs is not expected"
                              " after starting.\nDetail: vm_context="
                              "%s, qemu_conf_label_trans=%s" %
                              (vm_context, qemu_conf_label_trans))
                if disk_context != qemu_conf_label_trans:
                    if dynamic_ownership:
                        test.fail("Label of disk is not expected " +
                                  "after VM starting.\nDetail: di" +
                                  "sk_context=%s, " % disk_context +
                                  "qemu_conf_label_trans=%s." %
                                  qemu_conf_label_trans)

            # check vm started with -name $vm_name,process=qemu:$vm_name
            if set_process_name:
                if libvirt_version.version_compare(1, 3, 5):
                    chk_str = "-name guest=%s,process=qemu:%s" % (vm_name,
                                                                  vm_name)
                else:
                    chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
                cmd = "ps -p %s -o command=" % vm_pid
                result = process.run(cmd, shell=True)
                if chk_str in result.stdout:
                    logging.debug("%s found in vm process command: %s" %
                                  (chk_str, result.stdout))
                else:
                    test.fail("%s not in vm process command: %s" %
                              (chk_str, result.stdout))

            # Check the label of disk after VM being destroyed.
            vm.destroy()
            f = os.open(disks.values()[0]['source'], 0)
            stat_re = os.fstat(f)
            img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)
            if set_sec_label and sec_relabel == "yes":
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when sec_relabel enabled.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
            elif set_qemu_conf and not set_sec_label:
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when only set qemu.conf.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
                else:
                    if (not img_label_after == img_label):
                        test.fail("Bug: Label of disk is changed\n"
                                  "Detail: img_label_after=%s, "
                                  "img_label=%s.\n" %
                                  (img_label_after, img_label))
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                err_msg = "Domain start failed as expected, check "
                err_msg += "more in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=856951"
                if set_sec_label:
                    if sec_label:
                        if sec_relabel == "yes" and sec_label_trans == "0:0":
                            if set_qemu_conf:
                                if qemu_conf_label_trans == "107:107":
                                    logging.debug(err_msg)
                        elif sec_relabel == "no" and sec_label_trans == "0:0":
                            if not set_qemu_conf:
                                logging.debug(err_msg)
                else:
                    test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
            if qemu_disk_mod:
                st = os.stat(path)
                os.chmod(path, st.st_mode ^ stat.S_IWGRP)
        if set_sec_label:
            backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        if create_qemu_user:
            cmd = "userdel -r vdsm_fake"
            output = process.run(cmd, ignore_status=True, shell=True)
        utils_selinux.set_status(backup_sestatus)
def run(test, params, env):
    """
    Test libvirt-guests service
    """
    def test_start_while_libvirtd_stopped():
        """
        Check the status of active libvirt-guests status while libvirtd is
        stopped.
        """
        logging.info("Stopping libvirtd and libvirt-guests services...")
        optr_dict = {libvirtd: 'stop', libvirt_guests: 'stop'}
        test_setup(optr_dict)

        logging.info("Starting libvirt-guests service...")
        libvirt_guests.start()

        logging.info("libvirtd and libvirt-guests should be running.")
        if not libvirtd.is_running():
            test.fail("libvird should be running.")
        if not libvirt_guests.status():
            test.fail("libvirt-guests should be running.")

    def test_stop_while_libvirtd_stopped():
        """
        Check the status of inactive libvirt-guests status while libvirtd is
        stopped.
        """
        logging.info("Starting libvirtd and libvirt-guests services...")
        optr_dict = {libvirtd: 'start', libvirt_guests: 'start'}
        test_setup(optr_dict)

        logging.info("Stopping libvirtd service...")
        libvirtd.stop()
        logging.info("Stopping libvirt-guests...")
        libvirt_guests.stop()
        if libvirt_guests.status():
            test.fail("libvirt-guests should be down.")

    def test_restart_libvirtd_with_running_vm():
        """
        Libvirt-guests should not be restarted automatically when libvirtd is
        restarted.
        """
        logging.info("Starting libvirtd and libvirt-guests...")
        optr_dict = {libvirtd: 'start', libvirt_guests: 'start'}
        test_setup(optr_dict)

        logging.info("Starting VM...")
        vm_id = get_non_init_dom_id(vm)
        org_guests_pid = get_libvirt_guests_pid(libvirt_guests)

        logging.info("Restarting libvirtd...")
        libvirtd.restart()
        act_guests_pid = get_libvirt_guests_pid(libvirt_guests)
        if org_guests_pid != act_guests_pid:
            test.fail("Pid of libvirt-guests changed from {} to {}."
                      .format(org_guests_pid, act_guests_pid))
        vm_id_act = vm.get_id()
        if vm_id != vm_id_act:
            test.fail("Domain id changed! Expected: {}, Acatual: {}."
                      .format(vm_id, vm_id_act))

    def test_setup(optr_dict):
        """
        Setup services based on optr_dict

        :param optr_dict: Test parameters, eg. {libvirtd_obj: 'start'}
        """
        if not isinstance(optr_dict, dict):
            test.error("Incorrect 'optr_dict'! It must be a dict!")
        for serv, optr in optr_dict.items():
            if optr:
                if optr not in ['start', 'stop']:
                    test.error("Unknown service operation - %s!" % optr)
                getattr(serv, optr)()

    def get_non_init_dom_id(vm):
        """
        Prepare a VM with domain id not equal to 1

        :param vm: The VM object
        :return: VM's id
        """
        def _get_id():
            if not vm.is_alive():
                vm.destroy()
            vm.start()
            vmid = vm.get_id()
            logging.debug("vm id: %s.", vmid)
            if vmid != '1':
                return vmid
            else:
                vm.destroy()

        vm_id = utils_misc.wait_for(_get_id, 120)
        if not vm_id:
            test.error("Unable to get the expected vm id!")
        return vm_id

    def get_libvirt_guests_pid(libvirt_guests):
        """
        Get pid of libvirt-guests

        :param libvirt_guests: The libvirt-guests object
        :return: libvirt-guests' pid
        """
        cmdRes = libvirt_guests.raw_status()
        if cmdRes.exit_status:
            test.fail("libvirt-guests is down!")
        res = re.search('Main PID: (\d+)', cmdRes.stdout)
        if not res:
            test.fail("Unable to get pid of libvirt-guests!")
        else:
            logging.debug("Pidof libvirt-guests: %s.", res[1])
        return res[1]

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    libvirt_guests = Factory.create_service("libvirt-guests")
    libvirtd = utils_libvirtd.Libvirtd('virtqemud')

    try:
        run_test()
    finally:
        logging.info("Recover test enviroment.")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if libvirt_guests.status():
            libvirt_guests.stop()
Beispiel #26
0
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """
    def make_disk_snapshot(postfix_n, snapshot_take):
        """
        Make external snapshots for disks only.

        :param postfix_n: postfix option
        :param snapshot_take: snapshots taken.
        """
        # Add all disks into command line.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, snapshot_take):
            options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)

            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                test.fail("Failed to make snapshots for disks!")

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        first_disk_src = first_device['source']
        return first_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.
        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = (
                "cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail(
                "The disk image path:%s doesn't include the origin image: %s" %
                (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
    )
    backing_file_relative_path = "yes" == params.get(
        "backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            test.cancel("live active block commit is not supported"
                        " in current libvirt version.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    cmd_session = None
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide rbd host first.")
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({
                    'disk_source_name': replace_disk_image,
                    'disk_type': 'file',
                    'disk_src_protocol': 'file'
                })
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        make_disk_snapshot(postfix_n, snapshot_take)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = ''
            disk_xmls = vmxml.get_devices(device_type="disk")
            for disk in disk_xmls:
                if disk.get('device_tag') == 'disk':
                    disk_xml = disk
                    break

            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n, snapshot_take)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(list(range(snapshot_take))):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                if disk.device != 'disk':
                    continue
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            test.fail("Can't find disk xml with target %s" % blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introuduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                test.fail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout 1"

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source

        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        if backing_file_relative_path:
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            block_commit_index = snapshot_take
            expect_backing_file = False
            # Do block commit using --active
            for count in range(1, snapshot_take):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            if top_inactive:
                blockcommit_options = "  --wait --verbose --top vda[1] --base vda[2] --keep-relative"
                block_commit_index = snapshot_take - 1
                expect_backing_file = True
            # Do block commit with --wait if top_inactive
            for count in range(1, block_commit_index):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            check_chain_backing_files(blk_source_image, expect_backing_file)
            return

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target, blockcommit_options,
                                   **virsh_dargs)

        # Check status_error
        libvirt.check_exit_status(result, status_error)
        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    test.fail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    test.fail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                test.fail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                test.fail("blockcommit job type '%s'"
                                          " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug(
                                    "after active block commit job "
                                    "ready for pivot, the target disk"
                                    " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                            cmd_result = virsh.blockjob(vm_name,
                                                        blk_target,
                                                        '',
                                                        ignore_status=True,
                                                        debug=True)
                            libvirt.check_exit_status(cmd_result)
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s" %
                              disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    test.fail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name,
                                                  snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder,
                        shell=True)
        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
def run(test, params, env):
    """
    Test command: virsh net-destroy.

    The command can forcefully stop a given network.
    1.Make sure the network exists.
    2.Prepare network status.
    3.Perform virsh net-destroy operation.
    4.Check if the network has been destroyed.
    5.Recover network environment.
    6.Confirm the test result.
    """

    net_ref = params.get("net_destroy_net_ref")
    extra = params.get("net_destroy_extra", "")
    network_name = params.get("net_destroy_network", "default")
    network_status = params.get("net_destroy_status", "active")
    status_error = params.get("status_error", "no")
    net_persistent = "yes" == params.get("net_persistent", "yes")
    net_cfg_file = params.get("net_cfg_file",
                              "/usr/share/libvirt/networks/default.xml")
    check_libvirtd = "yes" == params.get("check_libvirtd")
    vm_defined = "yes" == params.get("vm_defined")
    check_vm = "yes" == params.get("check_vm")

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    output_all = virsh.net_list("--all").stdout.strip()
    # prepare the network status: active, persistent
    if not re.search(network_name, output_all):
        if net_persistent:
            virsh.net_define(net_cfg_file, ignore_status=False)
            virsh.net_start(network_name, ignore_status=False)
        else:
            virsh.create(net_cfg_file, ignore_status=False)
    # Backup the current network xml
    net_xml_bk = os.path.join(data_dir.get_tmp_dir(), "%s.xml" % network_name)
    virsh.net_dumpxml(network_name, to_file=net_xml_bk)
    if net_persistent:
        if not virsh.net_state_dict()[network_name]['persistent']:
            logging.debug("make the network persistent...")
            virsh.net_define(net_xml_bk)
    else:
        if virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_undefine(network_name, ignore_status=False)
    if not virsh.net_state_dict()[network_name]['active']:
        if network_status == "active":
            virsh.net_start(network_name, ignore_status=False)
    else:
        if network_status == "inactive":
            logging.debug(
                "destroy network as we need to test inactive network...")
            virsh.net_destroy(network_name, ignore_status=False)
    logging.debug("After prepare: %s" % virsh.net_state_dict())

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(network_name).stdout.strip()
    elif net_ref == "name":
        net_ref = network_name

    if check_libvirtd or check_vm:
        vm_name = params.get("main_vm")
        if virsh.is_alive(vm_name):
            virsh.destroy(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml
        # make sure there is interface with source network as default
        iface_devices = vmxml.get_devices(device_type="interface")
        has_default_net = False
        for iface in iface_devices:
            source = iface.get_source()
            if 'network' in source.keys() and source['network'] == 'default':
                has_default_net = True
                break
            elif 'bridge' in source.keys() and source['bridge'] == 'virbr0':
                has_default_net = True
                break
        if not has_default_net:
            options = "network default --current"
            virsh.attach_interface(vm_name, options, ignore_status=False)
        try:
            if vm_defined:
                ret = virsh.start(vm_name)
            else:
                logging.debug("undefine the vm, then create the vm...")
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                virsh.undefine(vm_name)
                ret = virsh.create(vmxml.xml)
                logging.debug(ret.stdout)
            # check the create or start cmd status
            utils_test.libvirt.check_exit_status(
                ret, expect_error=(network_status != 'active'))
            status = 1

            if status_error != 'yes':
                libvirtd = utils_libvirtd.Libvirtd("virtqemud")
                daemon_name = libvirtd.service_name
                pid_before_run = utils_misc.get_pid(daemon_name)
                ret = virsh.net_destroy(net_ref,
                                        extra,
                                        uri=uri,
                                        debug=True,
                                        unprivileged_user=unprivileged_user,
                                        ignore_status=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                # check_libvirtd pid no change
                pid_after_run = utils_misc.get_pid(daemon_name)
                if pid_after_run != pid_before_run:
                    test.fail("libvirtd crash after destroy network!")
                    status = 1
                else:
                    logging.debug(
                        "libvirtd do not crash after destroy network!")
                    status = 0
                if check_libvirtd:
                    # destroy vm, check libvirtd pid no change
                    ret = virsh.destroy(vm_name)
                    utils_test.libvirt.check_exit_status(ret,
                                                         expect_error=False)
                    pid_after_run2 = utils_misc.get_pid(daemon_name)
                    if pid_after_run2 != pid_before_run:
                        test.fail("libvirtd crash after destroy vm!")
                        status = 1
                    else:
                        logging.debug(
                            "libvirtd do not crash after destroy vm!")
                        status = 0
                elif check_vm:
                    # restart libvirtd and check vm is running
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    if not virsh.is_alive(vm_name):
                        test.fail(
                            "vm shutdown when transient network destroyed then libvirtd restart"
                        )
                    else:
                        status = 0

        finally:
            if not vm_defined:
                vmxml_backup.define()
            vmxml_backup.sync()

    else:
        readonly = (params.get("net_destroy_readonly", "no") == "yes")
        status = virsh.net_destroy(net_ref,
                                   extra,
                                   uri=uri,
                                   readonly=readonly,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   ignore_status=True).exit_status
        # Confirm the network has been destroyed.
        if net_persistent:
            if virsh.net_state_dict()[network_name]['active']:
                status = 1
        else:
            output_all = virsh.net_list("--all").stdout.strip()
            if re.search(network_name, output_all):
                status = 1
                logging.debug(
                    "transient network should not exists after destroy")

    # Recover network status to system default status
    try:
        if network_name not in virsh.net_state_dict():
            virsh.net_define(net_xml_bk, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['active']:
            virsh.net_start(network_name, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_define(net_xml_bk, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['autostart']:
            virsh.net_autostart(network_name, ignore_status=False)
    except process.CmdError:
        test.error("Recover network status failed!")

    # Clean up the backup network xml file
    if os.path.isfile(net_xml_bk):
        data_dir.clean_tmp_files()
        logging.debug("Cleaning up the network backup xml")

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    else:
        test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = libvirt.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith(
                                "hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info(
                    'The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if at_with_shareable:
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(test.tmpdir, device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1G",
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if at_with_shareable or (test_twice
                                 and libvirt_version.version_compare(3, 9, 0)):
            s_at_options += ' --mode shareable'
        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     s_at_options).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")

        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1",
                disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":

        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if test_twice and libvirt_version.version_compare(3, 9, 0):
            if not at_with_shareable:
                at_options += " --mode shareable"
        status = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True).exit_status
    elif test_cmd == "detach-disk":
        status = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1G",
            disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref,
                                       device_source,
                                       device_target2,
                                       at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref,
                                       device_target2,
                                       dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log' %
                      test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' +
               'grep "%s" | tail -n1 | grep "res=success"' % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target,
                                            old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(test.tmpdir, "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': device_target,
                'target_bus': device_disk_bus
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source,
                            shell=True,
                            ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value." % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file" " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after" " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after" " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file " "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure " "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
Beispiel #29
0
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash,fsid=0")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = [
        "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"
    ]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" %
                                            pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = utils.run("qemu-img info %s" % img_path)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name,
                                       source=img_path,
                                       target="vdf",
                                       extra=extra,
                                       debug=True)
            if result.exit_status:
                raise error.TestNAError("Failed to attach disk %s to VM."
                                        "Detail: %s." %
                                        (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if disk_xml.source.attrs.has_key('file'):
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif disk_xml.source.attrs.has_key('name'):
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif (disk_xml.source.attrs.has_key('dev')
                          and disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search(
                            "live disk snapshot not supported with this "
                            "QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search(
                                "internal snapshot of a running VM" +
                                " must include the memory state", out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)

                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(
                    vm_name, snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options,
                                                        debug=True)
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name,
                                                  snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search(
                        "revert to external \w* ?snapshot not supported yet",
                        revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
                else:
                    raise error.TestFail("Revert snapshot failed. %s" %
                                         revert_result.stderr.strip())

            if vm.is_dead():
                raise error.TestFail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    raise error.TestFail(
                        "Revert command successed, but VM is not "
                        "paused after reverting with --paused"
                        "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" %
                                                       tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                raise error.TestFail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name,
                                                   snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail(
                                "Snapshot xml file %s missing" % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still" %
                                                 snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2",
                                              brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name,
                                 pool_type,
                                 pool_target,
                                 emulated_image,
                                 source_name=vol_name)
            except error.TestFail, detail:
                libvirtd.restart()
                logging.error(str(detail))
Beispiel #30
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Attach the required interface
    3) According test type(only attach or both attach and detach):
       a.Go on to test detach(if attaching is correct)
       b.Return GOOD or raise TestFail(if attaching is wrong)
    4) Check if attached interface is correct:
       a.Try to catch it in vm's XML file
       b.Try to catch it in vm
    5) Detach the attached interface
    6) Check result
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Test parameters
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    vm_ref = params.get("at_detach_iface_vm_ref", "domname")
    options_suffix = params.get("at_detach_iface_options_suffix", "")
    status_error = "yes" == params.get("status_error", "no")
    start_vm = params.get("start_vm")
    # Should attach must be pass for detach test.
    correct_attach = "yes" == params.get("correct_attach", "no")
    readonly = ("yes" == params.get("readonly", "no"))

    # Interface specific attributes.
    iface_type = params.get("at_detach_iface_type", "network")
    iface_source = params.get("at_detach_iface_source", "default")
    iface_mode = params.get("at_detach_iface_mode", "vepa")
    iface_mac = params.get("at_detach_iface_mac", "created")
    iface_target = params.get("at_detach_iface_target")
    iface_model = params.get("at_detach_iface_model")
    iface_inbound = params.get("at_detach_iface_inbound")
    iface_outbound = params.get("at_detach_iface_outbound")
    iface_rom = params.get("at_detach_rom_bar")
    iface_link = params.get("at_detach_link_state")
    iface_boot = params.get("at_detach_boot_order")
    iface_driver = params.get("at_detach_iface_driver")
    iface_driver_host = params.get("at_detach_driver_host")
    iface_driver_guest = params.get("at_detach_driver_guest")
    iface_backend = params.get("at_detach_iface_backend")

    save_restore = params.get("save_restore", "no")
    restart_libvirtd = params.get("restart_libvirtd", "no")
    attach_cmd = params.get("attach_cmd", "attach-interface")
    virsh_dargs = {'ignore_status': True, 'debug': True, 'uri': uri}
    validate_xml_result = "yes" == params.get("check_xml_result", "no")
    paused_after_vm_start = "yes" == params.get("paused_after_vm_start", "no")
    machine_type = params.get("machine_type")

    # Get iface name if iface_type is direct
    if iface_type == "direct":
        iface_source = utils_net.get_net_if(state="UP")[0]
    # Get a bridge name for test if iface_type is bridge.
    # If there is no bridge other than virbr0, try to create one
    # or fail test
    if iface_type == "bridge":
        host_bridge = utils_net.Bridge()
        bridge_list = host_bridge.list_br()
        try:
            bridge_list.remove("virbr0")
        except AttributeError:
            pass  # If no virbr0, just pass is ok
        logging.debug("Useful bridges:%s", bridge_list)
        if len(bridge_list):
            iface_source = bridge_list[0]
        else:
            process.run('ip link add name br0 type bridge',
                        ignore_status=False)
            iface_source = 'br0'
            logging.debug("Added bridge br0")

    # Test both detach and attach, So collect info
    # both of them for result check.
    # When something wrong with interface, set it to 1
    fail_flag = 0
    result_info = []

    # Get a mac address if iface_mac is 'created'.
    if iface_mac == "created" or correct_attach:
        iface_mac = utils_net.generate_mac_address_simple()

    names = locals()
    iface_format = get_formatted_iface_dict(names, params.get("vm_arch_name"))

    # for rtl8139 model, need to add pcie bridge
    if iface_model == "rtl8139" and machine_type == "q35":
        add_pcie_controller(vm_name)
        if start_vm == "yes" and not vm.is_alive():
            vm.start()

    try:
        # Generate xml file if using attach-device command
        if attach_cmd == "attach-device":
            # Change boot order to disk
            libvirt.change_boot_order(vm_name, "disk", "1")
            vm.destroy()
            vm.start()
            # Generate attached xml
            new_iface = Interface(type_name=iface_type)
            if any(x in params['name']
                   for x in ('multiqueue', 'multi_options')):
                tmp_iface_format = iface_format.copy()
                tmp_iface_format.update({
                    'source':
                    "{'%s': '%s'}" % (iface_type, iface_format['source'])
                })
                xml_file_tmp = libvirt.modify_vm_iface(vm_name, "get_xml",
                                                       tmp_iface_format)
            else:
                xml_file_tmp = libvirt.modify_vm_iface(vm_name, "get_xml",
                                                       iface_format)
            new_iface.xml = xml_file_tmp
            new_iface.del_address()
            xml_file = new_iface.xml

        # To confirm vm's state and make sure os fully started
        if start_vm == "no":
            if vm.is_alive():
                vm.destroy()
        else:
            vm.wait_for_login().close()

        if paused_after_vm_start:
            vm.pause()

        # Set attach-interface domain
        dom_uuid = vm.get_uuid()
        dom_id = vm.get_id()

        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = dom_id
        elif vm_ref == "domuuid":
            vm_ref = dom_uuid
        elif vm_ref == "hexdomid" and dom_id is not None:
            vm_ref = hex(int(dom_id))

        # Set attach-interface options and Start attach-interface test
        if correct_attach:
            options = set_options("network", "default", iface_mac, "",
                                  "attach", None, iface_model)
            if readonly:
                virsh_dargs.update({'readonly': True, 'debug': True})
            attach_result = virsh.attach_interface(vm_name, options,
                                                   **virsh_dargs)
        else:
            if attach_cmd == "attach-interface":
                options = set_options(iface_type, iface_source, iface_mac,
                                      options_suffix, "attach", iface_target,
                                      iface_model, iface_inbound,
                                      iface_outbound)
                attach_result = virsh.attach_interface(vm_ref, options,
                                                       **virsh_dargs)
            elif attach_cmd == "attach-device":
                attach_result = virsh.attach_device(vm_name,
                                                    xml_file,
                                                    ignore_status=True,
                                                    debug=True)
        attach_status = attach_result.exit_status
        logging.debug(attach_result)

        # If attach interface failed.
        if attach_status:
            if not status_error:
                fail_flag = 1
                result_info.append("Attach Failed: %s" % attach_result.stderr)
            elif status_error:
                # Here we just use it to exit, do not mean test failed
                fail_flag = 1
        # If attach interface succeeded.
        else:
            if status_error and not correct_attach:
                fail_flag = 1
                result_info.append("Attach Success with wrong command.")

        if fail_flag and start_vm == "yes":
            vm.destroy()
            if len(result_info):
                test.fail(result_info)
            else:
                # Exit because it is error_test for attach-interface.
                return

        if "print-xml" in options_suffix:
            iface_obj = Interface(type_name=iface_type)
            iface_obj.xml = attach_result.stdout.strip()
            source_type = iface_type if iface_type == 'bridge' else 'dev'
            if (iface_obj.type_name == iface_type
                    and iface_obj.source.get(source_type) == iface_source
                    and iface_obj.target.get('dev') == iface_target
                    and iface_obj.model == iface_model
                    and iface_obj.bandwidth.inbound == eval(
                        iface_format['inbound'])
                    and iface_obj.bandwidth.outbound == eval(
                        iface_format['outbound'])
                    and iface_obj.mac_address == iface_mac):
                logging.info("Print ml all element check pass")
            else:
                test.fail("Print xml do not show as expected")

        # Check dumpxml file whether the interface is added successfully.
        status, ret = check_dumpxml_iface(vm_name, iface_format)
        if "print-xml" not in options_suffix:
            # Check validate_xml_result flag to determine whether apply check_interface_xml.
            if validate_xml_result:
                # If options_suffix contains config, it need dump inactive xml.
                is_active = True
                if options_suffix.count("config"):
                    is_active = False
                # Check dumping VM xml value.
                if not check_interface_xml(vm_name, iface_type, iface_source,
                                           iface_mac, is_active):
                    test.fail(
                        "Failed to find matched interface values in VM interface xml"
                    )
            if status:
                fail_flag = 1
                result_info.append(ret)
        else:
            if status == 0:
                test.fail(
                    "Attach interface effect in xml with print-xml option")
            else:
                return

        # Login to domain to check new interface.
        if not vm.is_alive():
            vm.start()
        elif vm.state() == "paused":
            vm.resume()
        vm.wait_for_login().close()

        status, ret = login_to_check(vm, iface_mac)
        if status:
            fail_flag = 1
            result_info.append(ret)

        # Check on host for direct type
        if iface_type == 'direct':
            cmd_result = process.run(
                "ip -d link show test").stdout_text.strip()
            logging.info("cmd output is %s", cmd_result)
            check_patten = (
                "%s@%s.*\n.*%s.*\n.*macvtap.*mode.*%s" %
                (iface_target, iface_source, iface_mac, iface_mode))
            logging.info("check patten is %s", check_patten)
            if not re.search(check_patten, cmd_result):
                logging.error("Can not find %s in ip link" % check_patten)
                fail_flag = 1
                result_info.append(cmd_result)

        # Do operation and check again
        if restart_libvirtd == "yes":
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()

        if save_restore == "yes":
            check_save_restore(vm_name)

        status, ret = check_dumpxml_iface(vm_name, iface_format)
        if status:
            fail_flag = 1
            result_info.append(ret)

        # Set detach-interface options
        options = set_options(iface_type, None, iface_mac, options_suffix,
                              "detach")

        # Start detach-interface test
        if save_restore == "yes" and vm_ref == dom_id:
            vm_ref = vm_name
        detach_result = virsh.detach_interface(vm_ref,
                                               options,
                                               wait_remove_event=True,
                                               **virsh_dargs)
        detach_status = detach_result.exit_status
        detach_msg = detach_result.stderr.strip()

        logging.debug(detach_result)

        if detach_status == 0 and status_error == 0:
            # If command with --config parameter, ignore below checking.
            if options_suffix.count("config"):
                return
            # Check the xml after detach and clean up if needed.
            time.sleep(5)
            status, _ = check_dumpxml_iface(vm_name, iface_format)
            if status == 0:
                detach_status = 1
                detach_msg = "xml still exist after detach"
                cleanup_options = "--type %s --mac %s" % (iface_type,
                                                          iface_mac)
                virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs)
            else:
                logging.info("After detach, the interface xml disappeared")

        # Check results.
        if status_error:
            if detach_status == 0:
                test.fail("Detach Success with wrong command.")
        else:
            if detach_status != 0:
                test.fail("Detach Failed: %s" % detach_msg)
            else:
                if fail_flag:
                    test.fail("Attach-Detach Success but "
                              "something wrong with its "
                              "functional use:%s" % result_info)
    finally:
        if vm.is_alive():
            vm.destroy()
        backup_xml.sync()