コード例 #1
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(TMP_DATA_DIR, "rbd.mem")
        snap_disk = os.path.join(TMP_DATA_DIR, "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        elif test_disk_readonly:
            if libvirt_version.version_compare(6, 0, 0):
                libvirt.check_result(ret)
            else:
                libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
コード例 #2
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
コード例 #3
0
    def check_snapshot(snap_option):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(test.tmpdir, "rbd.mem")
        snap_disk = os.path.join(test.tmpdir, "rbd.disk")
        xml_snap_exp = ["disk name='vda' snapshot='external' type='file'"]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec vda,file=%s --disk-only" %
                       (snap_name, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec vda,file="
                       "%s" % (snap_name, snap_mem, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                raise exceptions.TestFail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                raise exceptions.TestFail("Failed to check domain xml")
コード例 #4
0
def run(test, params, env):
    """
    Test virsh blockcopy with various option based on transient_guest.

    1.Prepare backend storage (iscsi)
    2.Start VM
    3.Execute virsh blockcopy target command
    4.Check status after operation accomplished
    5.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def setup_file_backend_env(params):
        """
        Setup iscsi test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        type_name = params.get("virt_disk_device_type")
        disk_device = params.get("virt_disk_device")
        device_target = params.get("virt_disk_device_target")
        device_bus = params.get("virt_disk_device_bus")
        device_format = params.get("virt_disk_device_format")
        blockcopy_image_name = params.get("blockcopy_image_name")
        emulated_size = int(params.get("emulated_size", "2"))

        libvirt.create_local_disk("file", blockcopy_image_name, emulated_size,
                                  "qcow2")

        disk_src_dict = {"attrs": {"file": blockcopy_image_name}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("guest xml after undefined and recreated:\n%s",
                      file_disk)
        return file_disk

    def start_pivot_blkcpy_on_transient_vm():
        """
        Start blockcopy with pivot option
        """
        external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
            vm, device_target, "trans_snapshot", snapshot_take)
        logging.debug("external snapshots:%s\n", external_snapshot_disks)
        external_snapshot_disks.pop()
        for sub_option in ["--shallow --pivot", "--pivot"]:
            tmp_copy_path = os.path.join(
                data_dir.get_data_dir(),
                "%s_%s.img" % (vm_name, sub_option[2:5]))
            tmp_blkcopy_path.append(tmp_copy_path)
            if os.path.exists(tmp_copy_path):
                libvirt.delete_local_disk('file', tmp_copy_path)
            virsh.blockcopy(vm_name,
                            device_target,
                            tmp_copy_path,
                            options=sub_option,
                            ignore_status=False,
                            debug=True)
            back_chain_files = libvirt_disk.get_chain_backing_files(
                tmp_copy_path)
            back_chain_files = back_chain_files[1:len(back_chain_files)]
            logging.debug("debug blockcopy xml restore:%s and %s\n",
                          external_snapshot_disks, back_chain_files)
            if back_chain_files != external_snapshot_disks:
                test.fail("can not get identical backing chain")
            utils_misc.wait_for(
                lambda: libvirt.check_blockjob(vm_name, device_target), 5)
            #After pivot, no backing chain exists
            external_snapshot_disks = []

    def check_info_in_libvird_log_file(matchedMsg=None):
        """
        Check if information can be found in libvirtd log.

        :params matchedMsg: expected matched messages
        """
        # Check libvirtd log file.
        libvirtd_log_file = log_config_path
        if not os.path.exists(libvirtd_log_file):
            test.fail("Expected VM log file: %s not exists" %
                      libvirtd_log_file)
        cmd = ("grep -nr '%s' %s" % (matchedMsg, libvirtd_log_file))
        return process.run(cmd, ignore_status=True,
                           shell=True).exit_status == 0

    def check_bandwidth_progress(bandwidth_value):
        """
        Check bandwidth

        :param bandwidth_value: expected bandwidth value
        """
        ret = utils_misc.wait_for(
            lambda: libvirt.check_blockjob(vm_name, device_target, "bandwidth",
                                           bandwidth_value), 30)
        if not ret:
            test.fail("Failed to get bandwidth limit output")

    def _extend_blkcpy_execution(sub_option,
                                 sub_status_error,
                                 pre_created=False):
        """
        Wrap up blockcopy execution combining with various options

        :params sub_option: option
        :params sub_status_error: expected error or not
        :params pre_created: whether pre-created
        """
        tmp_copy_path = os.path.join(data_dir.get_data_dir(),
                                     "%s_%s.img" % (vm_name, sub_option))
        if os.path.exists(tmp_copy_path):
            libvirt.delete_local_disk('file', tmp_copy_path)
        if pre_created:
            libvirt.create_local_disk('file', tmp_copy_path, '10M', 'qcow2')
        tmp_option = params.get("options") % sub_option
        if "default" in tmp_option:
            tmp_option = " --wait --verbose"
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 tmp_copy_path,
                                 options=tmp_option,
                                 ignore_status=True,
                                 debug=True)
        logging.debug(sub_status_error)
        libvirt.check_exit_status(result, expect_error=sub_status_error)

    def start_granularity_blkcpy_on_transient_vm():
        """Start blockcopy with granularity operations """
        granularity_value = params.get('granularity_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(granularity_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            if not option_status_error:
                #Check log whether granularity keyword is there
                result = utils_misc.wait_for(
                    lambda: check_info_in_libvird_log_file('"granularity":%s' %
                                                           sub_option),
                    timeout=20)
                if not result:
                    test.fail(
                        "Failed to get expected messages from log file: %s." %
                        log_config_path)

            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_bandwidth_blkcpy_on_transient_vm():
        """Start blockcopy with bandwidth operations """
        bandwidth_value = params.get('bandwidth_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(bandwidth_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            if not sub_status_error and 'default' not in sub_option:
                check_bandwidth_progress(sub_option)
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_timeout_blkcpy_on_transient_vm():
        """Start blockcopy with timeout operations """
        timeout_value = params.get('timeout_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(timeout_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_bufsize_blkcpy_on_transient_vm():
        """Start blockcopy with buffer size operations """
        bufsize_value = params.get('bufsize_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(bufsize_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            if not option_status_error:
                #Check log whether  buf-size keyword is there
                result = utils_misc.wait_for(
                    lambda: check_info_in_libvird_log_file('buf-size'),
                    timeout=20)
                if not result:
                    test.fail(
                        "Failed to get expected messages from log file: %s." %
                        log_config_path)
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_reuse_external_blkcpy_on_transient_vm():
        """Start reuse external blockcopy operations """
        reuse_external_value = params.get('reuse-external_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(reuse_external_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option,
                                     sub_status_error,
                                     pre_created=True)
            if option_status_error:
                #Check blockcommit job information
                job_result = virsh.blockjob(
                    vm_name, device_target, '--info',
                    ignore_status=True).stdout_text.strip()
                if 'No current block job for' not in job_result:
                    test.fail(
                        "Failed to get unexpected active blockcommit job")
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    # Disk specific attributes.
    device_target = params.get("virt_disk_device_target", "vdd")
    blockcopy_option = params.get("blockcopy_option")
    backend_storage_type = params.get("backend_storage_type")
    device_type = params.get("virt_disk_device_type")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    snapshot_take = int(params.get("snapshot_take", "4"))

    # Configure libvirtd log path
    log_config_path = params.get("libvirtd_debug_file",
                                 "/var/log/libvirt/libvird.log")

    # Additional disk images.
    tmp_blkcopy_path = []
    external_snapshot_disks = []
    attach_disk_xml = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Setup backend storage
        if backend_storage_type == "file":
            attach_disk_xml = setup_file_backend_env(params)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("disk xml is:\n%s" % attach_disk_xml)
        # Sync VM xml.
        if attach_disk_xml is None:
            test.fail("Fail to create attached disk xml")
        else:
            vmxml.add_device(attach_disk_xml)
            vmxml.sync()
        try:
            # Create a transient VM
            transient_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            virsh.undefine(vm_name, debug=True, ignore_status=False)
            virsh.create(transient_vmxml.xml, ignore_status=False, debug=True)
            vm.wait_for_login().close()
            debug_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("guest xml after undefined and recreated:%s\n",
                          debug_xml)
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))

        if blockcopy_option in ['pivot_shadow']:
            start_pivot_blkcpy_on_transient_vm()

        if blockcopy_option in ['granularity']:
            start_granularity_blkcpy_on_transient_vm()

        if blockcopy_option in ['bandwidth']:
            start_bandwidth_blkcpy_on_transient_vm()

        if blockcopy_option in ['timeout']:
            start_timeout_blkcpy_on_transient_vm()

        if blockcopy_option in ['buf_size']:
            start_bufsize_blkcpy_on_transient_vm()

        if blockcopy_option in ['reuse_external']:
            start_reuse_external_blkcpy_on_transient_vm()
    finally:
        if virsh.domain_exists(vm_name):
            #To clean up snapshots and restore VM
            try:
                libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            finally:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                virsh.define(vmxml_backup.xml, debug=True)
        vmxml_backup.sync()
        # Clean up backend storage
        for tmp_path in tmp_blkcopy_path:
            if os.path.exists(tmp_path):
                libvirt.delete_local_disk('file', tmp_path)
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
コード例 #5
0
def run(test, params, env):
    """
    Test rng device options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def modify_rng_xml(dparams, sync=True):
        """
        Modify interface xml options
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_model = dparams.get("backend_model", "random")
        backend_type = dparams.get("backend_type")
        backend_dev = dparams.get("backend_dev", "")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        backend_protocol = dparams.get("backend_protocol")
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng_xml = rng.Rng()
        rng_xml.rng_model = rng_model
        if rng_rate:
            rng_xml.rate = ast.literal_eval(rng_rate)
        backend = rng.Rng.Backend()
        backend.backend_model = backend_model
        if backend_type:
            backend.backend_type = backend_type
        if backend_dev:
            backend.backend_dev = backend_dev
        if backend_source_list:
            source_list = [ast.literal_eval(source) for source in
                           backend_source_list]
            backend.source = source_list
        if backend_protocol:
            backend.backend_protocol = backend_protocol
        rng_xml.backend = backend

        logging.debug("Rng xml: %s", rng_xml)
        if sync:
            vmxml.add_device(rng_xml)
            vmxml.xmltreefile.write()
            vmxml.sync()
        else:
            status = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<rng.*<\/rng>",
                                      str(rng_xml), re.M
                                      )[0].replace("/", "\/"))])
            if not status:
                test.fail("Failed to edit vm xml")

    def check_qemu_cmd(dparams):
        """
        Verify qemu-kvm command line.
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_type = dparams.get("backend_type")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        cmd = ("ps -ef | grep %s | grep -v grep" % vm_name)
        chardev = src_host = src_port = None
        if backend_type == "tcp":
            chardev = "socket"
        elif backend_type == "udp":
            chardev = "udp"
        for bc_source in backend_source_list:
            source = ast.literal_eval(bc_source)
            if "mode" in source and source['mode'] == "connect":
                src_host = source['host']
                src_port = source['service']

        if chardev and src_host and src_port:
            cmd += (" | grep 'chardev %s,.*host=%s,port=%s'"
                    % (chardev, src_host, src_port))
        if rng_model == "virtio":
            cmd += (" | grep 'device virtio-rng-pci'")
        if rng_rate:
            rate = ast.literal_eval(rng_rate)
            cmd += (" | grep 'max-bytes=%s,period=%s'"
                    % (rate['bytes'], rate['period']))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Cann't see rng option"
                      " in command line")

    def check_host():
        """
        Check random device on host
        """
        backend_dev = params.get("backend_dev")
        if backend_dev:
            cmd = "lsof |grep %s" % backend_dev
            ret = process.run(cmd, ignore_status=True, shell=True)
            if ret.exit_status or not ret.stdout.count("qemu"):
                test.fail("Failed to check random device"
                          " on host, command output: %s" %
                          ret.stdout)

    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist"
                      % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)))
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name)
        if ret.stdout.count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name)
        if not ret.stdout.count(snapshot_name2):
            test.fail("Failed to find snapshot disk")

    def check_guest(session):
        """
        Check random device on guest
        """
        rng_files = (
            "/sys/devices/virtual/misc/hw_random/rng_available",
            "/sys/devices/virtual/misc/hw_random/rng_current")
        rng_avail = session.cmd_output("cat %s" % rng_files[0],
                                       timeout=600).strip()
        rng_currt = session.cmd_output("cat %s" % rng_files[1],
                                       timeout=600).strip()
        logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt)
        if not rng_currt.count("virtio") or rng_currt not in rng_avail:
            test.fail("Failed to check rng file on guest")

        # Read the random device
        cmd = ("dd if=/dev/hwrng of=rng.test count=100"
               " && rm -f rng.test")
        ret, output = session.cmd_status_output(cmd, timeout=600)
        if ret:
            test.fail("Failed to read the random device")
        rng_rate = params.get("rng_rate")
        if rng_rate:
            rate_bytes, rate_period = ast.literal_eval(rng_rate).values()
            rate_conf = float(rate_bytes) / (float(rate_period)/1000)
            ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s",
                            output, re.M)
            if not ret:
                test.fail("Can't find rate from output")
            rate_real = float(ret.group(1)) / float(ret.group(2))
            logging.debug("Find rate: %s, config rate: %s",
                          rate_real, rate_conf)
            if rate_real > rate_conf * 1.2:
                test.fail("The rate of reading exceed"
                          " the limitation of configuration")
        if device_num > 1:
            rng_dev = rng_avail.split()
            if len(rng_dev) != device_num:
                test.skip("Multiple virtio-rng devices are not"
                          " supported on this guest kernel. "
                          "Bug: https://bugzilla.redhat.com/"
                          "show_bug.cgi?id=915335")
            session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1]))
            # Read the random device
            if session.cmd_status(cmd, timeout=120):
                test.fail("Failed to read the random device")

    start_error = "yes" == params.get("start_error", "no")

    test_host = "yes" == params.get("test_host", "no")
    test_guest = "yes" == params.get("test_guest", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    snapshot_vm_running = "yes" == params.get("snapshot_vm_running",
                                              "no")
    snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no")
    snapshot_name = params.get("snapshot_name")
    device_num = int(params.get("device_num", 1))

    if device_num > 1 and not libvirt_version.version_compare(1, 2, 7):
        test.skip("Multiple virtio-rng devices not "
                  "supported on this libvirt version")
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Try to install rng-tools on host, it can speed up random rate
    # if installation failed, ignore the error and continue the test
    if utils_package.package_install(["rng-tools"]):
        rngd_conf = "/etc/sysconfig/rngd"
        rngd_srv = "/usr/lib/systemd/system/rngd.service"
        if os.path.exists(rngd_conf):
            # For rhel6 host, add extraoptions
            with open(rngd_conf, 'w') as f_rng:
                f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"')
        elif os.path.exists(rngd_srv):
            # For rhel7 host, modify start options
            rngd_srv_conf = "/etc/systemd/system/rngd.service"
            if not os.path.exists(rngd_srv_conf):
                shutil.copy(rngd_srv, rngd_srv_conf)
            process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd"
                        " -f -r /dev/urandom -o /dev/random#' %s"
                        % rngd_srv_conf, shell=True)
            process.run('systemctl daemon-reload')
        process.run("service rngd start")

    # Build the xml and run test.
    try:
        bgjob = None
        # Take snapshot if needed
        if snapshot_name:
            if snapshot_vm_running:
                vm.start()
                vm.wait_for_login().close()
            ret = virsh.snapshot_create_as(vm_name, snapshot_name)
            libvirt.check_exit_status(ret)

        # Destroy VM first
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Build vm xml.
        dparams = {}
        if device_num > 1:
            for i in xrange(device_num):
                dparams[i] = {"rng_model": params.get(
                    "rng_model_%s" % i, "virtio")}
                dparams[i].update({"backend_model": params.get(
                    "backend_model_%s" % i, "random")})
                bk_type = params.get("backend_type_%s" % i)
                if bk_type:
                    dparams[i].update({"backend_type": bk_type})
                bk_dev = params.get("backend_dev_%s" % i)
                if bk_dev:
                    dparams[i].update({"backend_dev": bk_dev})
                bk_src = params.get("backend_source_%s" % i)
                if bk_src:
                    dparams[i].update({"backend_source": bk_src})
                bk_pro = params.get("backend_protocol_%s" % i)
                if bk_pro:
                    dparams[i].update({"backend_protocol": bk_pro})
                modify_rng_xml(dparams[i], False)
        else:
            modify_rng_xml(params, not test_snapshot)

        try:
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)

            # Start the VM.
            vm.start()
            if start_error:
                test.fail("VM started unexpectedly")

            if test_qemu_cmd:
                if device_num > 1:
                    for i in xrange(device_num):
                        check_qemu_cmd(dparams[i])
                else:
                    check_qemu_cmd(params)
            if test_host:
                check_host()
            session = vm.wait_for_login()
            if test_guest:
                check_guest(session)
            session.close()

            if test_snapshot:
                check_snapshot(bgjob)
        except virt_vm.VMStartError as details:
            logging.info(str(details))
            if not start_error:
                test.fail('VM failed to start, '
                          'please refer to https://bugzilla.'
                          'redhat.com/show_bug.cgi?id=1220252:'
                          '\n%s' % details)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name, snapshot, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if bgjob:
            bgjob.kill_func()
コード例 #6
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --disk-only --diskspec vda,"
            "file=/tmp/testvm-snap1" % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --memspec file=%s,snapshot=external"
            " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' " "in command line" % cmd)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(
                    3, 9, 0):
                test.cancel(
                    "place auth in source is not supported in current libvirt version"
                )
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = (
                "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" %
                (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_uuid": uuid
            }
        elif auth_usage:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_usage": secret_usage_target
            }
        disk_source = disk_xml.new_disk_source(
            **{
                "attrs": {
                    "protocol": "iscsi",
                    "name": "%s/%s" % (iscsi_target, lun_num)
                },
                "hosts": [{
                    "name": iscsi_host,
                    "port": iscsi_port
                }]
            })
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update(
                        {addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start." "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
コード例 #7
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101"
                   .format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name, target,
                                   blockcommit_options, ignore_status=False, **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" % (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks") and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format)
            disks_img.append({"format": device_format,
                              "source": disk_path, "path": disk_path})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                   encode=True, ignore_status=False, debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(), image_filename)

            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}

            cmd = ("qemu-img create -f luks "
                   "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                   "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage",
                                            "libvirtiscsi")
                auth_sec_dict = {"sec_usage": "iscsi",
                                 "sec_target": auth_sec_usage}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                       encode=True, ignore_status=False, debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True, is_login=False, image_size=storage_size,
                    chap_user=chap_user, chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {"auth_user": chap_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_usage": auth_sec_usage_target}
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)

            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                 "sec_name": "ceph_auth_secret"}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {"auth_user": ceph_auth_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_uuid": auth_sec_uuid}
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_ip,
                                                               key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" %
                         (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                       " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                      device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path, image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]})
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port,
                                                image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s", str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target, snap_path)
                    virsh.snapshot_create_as(vm_name, snap_option,
                                             ignore_status=False, debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata",
                                         ignore_status=False, debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit,
                                                             args=(vm_name, 'vda', blockcommit_options,),
                                                             kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                # Do transient blockcopy in backgroud.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy,
                                                           args=(vm_name, 'vda', dest_path, blockcopy_options,),
                                                           kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
        set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True})
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off",
                                 shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
コード例 #8
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
コード例 #9
0
                ret = virsh.detach_device(vm_name,
                                          disks_xml[i].xml,
                                          flagstr=attach_option)
                os.remove(disks_xml[i].xml)
                libvirt.check_exit_status(ret)

            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices, device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name, snapshot, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        virsh.undefine(vm_name)
        virsh.define(vm_xml_file)
        os.remove(vm_xml_file)

        # Delete tmp files/disks.
        if qemu_conf_bak:
            shutil.copy(qemu_conf_bak, "/etc/libvirt/qemu.conf")
            os.remove(qemu_conf_bak)
コード例 #10
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def get_vm_memtotal(session):
        """
        Get guest total memory
        """
        proc_meminfo = session.cmd_output("cat /proc/meminfo")
        # verify format and units are expected
        return int(
            re.search(r'MemTotal:\s+(\d+)\s+[kK]B', proc_meminfo).group(1))

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd_str = 'memdimm.\|memory-backend-ram,id=ram-node.'
            cmd += (" | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                    (cmd_str, size))
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node
                                and node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        process.run(cmd, shell=True)

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(lambda: get_vm_memtotal(session) != int(old_mem),
                            20,
                            first=15.0)
        new_mem = get_vm_memtotal(session)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def create_mem_xml():
        """
        Create memory device xml.
        """
        mem_xml = memory.Memory()
        mem_model = params.get("mem_model", "dimm")
        mem_xml.mem_model = mem_model
        if tg_size:
            tg_xml = memory.Memory.Target()
            tg_xml.size = int(tg_size)
            tg_xml.size_unit = tg_sizeunit
            # There is support for non-numa node
            if numa_cells:
                tg_xml.node = int(tg_node)
            mem_xml.target = tg_xml
        if pg_size:
            src_xml = memory.Memory.Source()
            src_xml.pagesize = int(pg_size)
            src_xml.pagesize_unit = pg_unit
            src_xml.nodemask = node_mask
            mem_xml.source = src_xml
        if mem_addr:
            mem_xml.address = mem_xml.new_mem_address(**{"attrs": mem_addr})

        logging.debug("Memory device xml: %s", mem_xml)
        return mem_xml.copy()

    def add_device(dev_xml, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach_device:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    known_unplug_errors = []
    known_unplug_errors.append(params.get("known_unplug_errors"))

    # params for attached device
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = get_vm_memtotal(session)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device:
            at_times = int(params.get("attach_times", 1))
            dev_xml = create_mem_xml()
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                if x == at_times - 1:
                    add_device(dev_xml, attach_error)
                else:
                    add_device(dev_xml)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            if not dev_xml:
                dev_xml = create_mem_xml()
            for x in xrange(at_times):
                ret = virsh.detach_device(vm_name,
                                          dev_xml.xml,
                                          flagstr=attach_option)
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could not be unplugged
                            # The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured, while hot unplug"
                                ": %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
コード例 #11
0
ファイル: libvirt_mem.py プロジェクト: CongLi/tp-libvirt
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def get_vm_memtotal(session):
        """
        Get guest total memory
        """
        proc_meminfo = session.cmd_output("cat /proc/meminfo")
        # verify format and units are expected
        return int(re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                             proc_meminfo).group(1))

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'"
                    % (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd += (" | grep 'memory-backend-ram,id=memdimm0,size=%s"
                    % size)
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node and
                                node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        utils.run(cmd)

    def check_guest_meminfo(old_mem):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: get_vm_memtotal(session) != int(old_mem), 5)
        new_mem = get_vm_memtotal(session)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        if new_mem != int(old_mem) + int(tg_size):
            raise error.TestFail("Total memory on guest couldn't"
                                 " changed after attach memory "
                                 "device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                #assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                if len(mem_dev) != 1:
                    raise error.TestFail("Found wrong number of"
                                         " memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                assert int(new_max_mem) - int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                #assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils.log_last_traceback()
            raise error.TestFail("Found unmatched memory setting"
                                 " from domain xml")

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def create_mem_xml():
        """
        Create memory device xml.
        """
        mem_xml = memory.Memory()
        mem_model = params.get("mem_model", "dimm")
        mem_xml.mem_model = mem_model
        if tg_size:
            tg_xml = memory.Memory.Target()
            tg_xml.size = int(tg_size)
            tg_xml.size_unit = tg_sizeunit
            tg_xml.node = int(tg_node)
            mem_xml.target = tg_xml
        if pg_size:
            src_xml = memory.Memory.Source()
            src_xml.pagesize = int(pg_size)
            src_xml.pagesize_unit = pg_unit
            src_xml.nodemask = node_mask
            mem_xml.source = src_xml
        if mem_addr:
            mem_xml.address = mem_xml.new_mem_address(
                **{"attrs": mem_addr})

        logging.debug("Memory device xml: %s", mem_xml)
        return mem_xml.copy()

    def add_device(dev_xml, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach_device:
            ret = virsh.attach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    #cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")

    # params for attached device
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [ast.literal_eval(x)
                  for x in params.get("huge_pages", "").split()]
    numa_memnode = [ast.literal_eval(x)
                    for x in params.get("numa_memnode", "").split()]

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = get_vm_memtotal(session)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device:
            at_times = int(params.get("attach_times", 1))
            dev_xml = create_mem_xml()
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                if x == at_times - 1:
                    add_device(dev_xml, attach_error)
                else:
                    add_device(dev_xml)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml,
                            **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise error.TestFail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError:
                if start_error:
                    pass
                else:
                    raise error.TestFail("VM Failed to start"
                                         " for some reason!")

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem,
                                  flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config") and
                not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total)

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                raise error.TestFail("Numa memory can't be consumed"
                                     " on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        if detach_device:
            if not dev_xml:
                dev_xml = create_mem_xml()
            ret = virsh.detach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, detach_error)
            if test_dom_xml:
                check_dom_xml(dt_mem=detach_device)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
コード例 #12
0
def run(test, params, env):
    """
    Test for virt-xml-validate
    """
    # Get the full path of virt-xml-validate command.
    try:
        VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate")
    except ValueError:
        raise error.TestNAError("Not find virt-xml-validate command on host.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    net_name = params.get("net_dumpxml_name", "default")
    pool_name = params.get("pool_dumpxml_name", "default")
    schema = params.get("schema", "domain")
    output = params.get("output_file", "output")
    output_path = os.path.join(data_dir.get_tmp_dir(), output)

    valid_schemas = [
        "domain",
        "domainsnapshot",
        "network",
        "storagepool",
        "storagevol",
        "nodedev",
        "capability",
        "nwfilter",
        "secret",
        "interface",
    ]
    if schema not in valid_schemas:
        raise error.TestFail("invalid %s specified" % schema)

    virsh_dargs = {"ignore_status": True, "debug": True}
    if schema == "domainsnapshot":
        domainsnapshot_validate(vm_name, file=output_path, **virsh_dargs)
    elif schema == "network":
        network_validate(net_name, file=output_path, **virsh_dargs)
    elif schema == "storagepool":
        storagepool_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "storagevol":
        storagevol_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "nodedev":
        nodedev_validate(file=output_path, **virsh_dargs)
    elif schema == "capability":
        capability_validate(file=output_path, **virsh_dargs)
    elif schema == "nwfilter":
        nwfilter_validate(file=output_path, **virsh_dargs)
    elif schema == "secret":
        secret_validate(file=output_path, **virsh_dargs)
    elif schema == "interface":
        interface_validate(file=output_path, **virsh_dargs)
    else:
        # domain
        virsh.dumpxml(vm_name, to_file=output_path)

    cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema)
    cmd_result = utils.run(cmd, ignore_status=True)

    # Delete snapshots.
    snapshot_lists = virsh.snapshot_list(vm_name)
    if len(snapshot_lists) > 0:
        libvirt.clean_up_snapshots(vm_name, snapshot_lists)
        for snapshot in snapshot_lists:
            virsh.snapshot_delete(vm_name, snapshot, "--metadata")
    if cmd_result.exit_status:
        raise error.TestFail("virt-xml-validate command failed.\n" "Detail: %s." % cmd_result)

    if cmd_result.stdout.count("fail"):
        raise error.TestFail("xml fails to validate\n" "Detail: %s." % cmd_result)
コード例 #13
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata",
                                "s2": "s2 --memspec %s --no-metadata" % snapshot2_file,
                                "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file,
                                "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file),
                                "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)}
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")
    disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no")

    # Create /etc/ceph/ceph.conf file to suppress false warning error message.
    process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
    cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
           .format(mon_host))
    process.run(cmd, ignore_status=True, shell=True)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append('unsupported configuration: external snapshot ' +
                                   'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict["chown sanlock:sanlock " +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" +
                             "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'}
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name, additional_xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
コード例 #14
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only --diskspec vda,"
                                       "file=/tmp/testvm-snap1"
                                       % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       " --diskspec vda,file=/tmp/testvm-snap2"
                                       % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' "
                      "in command line" % cmd)

    def check_auth_plaintext(vm_name, password):
        """
        Check if libvirt passed the plaintext of the chap authentication
        password to qemu.
        :param vm_name: The name of vm to be checked.
        :param password: The plaintext of password used for chap authentication.
        :return: True if using plaintext, False if not.
        """
        cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s"
               % (vm_name, password))
        return process.system(cmd, ignore_status=True, shell=True) == 0

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in current libvirt version")
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
                   % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"protocol": "iscsi",
                         "name": "%s/%s" % (iscsi_target, lun_num)},
               "hosts": [{"name": iscsi_host, "port": iscsi_port}]})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update({addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start."
                          "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)
            # Test libvirt doesn't pass the plaintext of chap password to qemu,
            # this function is implemented in libvirt 4.3.0-1.
            if (libvirt_version.version_compare(4, 3, 0) and
                    (auth_uuid or auth_usage) and
                    chap_passwd):
                if(check_auth_plaintext(vm_name, chap_passwd)):
                    test.fail("Libvirt should not pass plaintext of chap "
                              "password to qemu-kvm.")

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
コード例 #15
0
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False):
        """
        Make external snapshots for disks only.

        :param postfix_n: postfix option
        :param snapshot_take: snapshots taken.
        """
        # Add all disks into command line.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, snapshot_take):
            options = "%s_%s %s%s-desc " % (postfix_n, count,
                                            postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)
            if is_check_snapshot_tree:
                options = options.replace("--no-metadata", "")
            cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                test.fail("Failed to make snapshots for disks!")

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

        def check_snapshot_tree():
            """
            Check whether predefined snapshot names are equals to
            snapshot names by virsh snapshot-list --tree
            """
            predefined_snapshot_name_list = []
            for count in range(1, snapshot_take):
                predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count))
            snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name
            result_output = process.run(snapshot_list_cmd,
                                        ignore_status=True, shell=True).stdout_text
            virsh_snapshot_name_list = []
            for line in result_output.rsplit("\n"):
                strip_line = line.strip()
                if strip_line and "|" not in strip_line:
                    virsh_snapshot_name_list.append(strip_line)
            # Compare two lists in their order and values, all need to be same.
            compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list)
                            if out_p not in out_v]
            if compare_list:
                test.fail("snapshot tree not correctly returned.")

        # If check_snapshot_tree is True, check snapshot tree output.
        if is_check_snapshot_tree:
            check_snapshot_tree()

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        first_disk_src = first_device['source']
        return first_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.
        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                   % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}
    check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no")

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            test.cancel("live active block commit is not supported"
                        " in current libvirt version.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" %
                  vm_name)

    snapshot_external_disks = []
    cmd_session = None
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ''
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide rbd host first.")
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = ''
            disk_xmls = vmxml.get_devices(device_type="disk")
            for disk in disk_xmls:
                if disk.get('device_tag') == 'disk':
                    disk_xml = disk
                    break

            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n, snapshot_take)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(list(range(snapshot_take))):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                if disk.device != 'disk':
                    continue
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            test.fail("Can't find disk xml with target %s" %
                      blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introuduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                test.fail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout 1"

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source

        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        if backing_file_relative_path:
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            block_commit_index = snapshot_take
            expect_backing_file = False
            # Do block commit using --active
            for count in range(1, snapshot_take):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            if top_inactive:
                blockcommit_options = "  --wait --verbose --top vda[1] --base vda[2] --keep-relative"
                block_commit_index = snapshot_take - 1
                expect_backing_file = True
            # Do block commit with --wait if top_inactive
            for count in range(1, block_commit_index):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            check_chain_backing_files(blk_source_image, expect_backing_file)
            return

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target,
                                   blockcommit_options, **virsh_dargs)

        # Check status_error
        libvirt.check_exit_status(result, status_error)
        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    test.fail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    test.fail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                test.fail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                test.fail("blockcommit job type '%s'"
                                          " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug("after active block commit job "
                                              "ready for pivot, the target disk"
                                              " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                            cmd_result = virsh.blockjob(vm_name, blk_target, '',
                                                        ignore_status=True, debug=True)
                            libvirt.check_exit_status(cmd_result)
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s"
                              % disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    test.fail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name, snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')

        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)
        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
コード例 #16
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(),
                                       "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(
            secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")
    auth_place_in_source = params.get("auth_place_in_source")

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(mon_host)

    # After libvirt 3.9.0, auth element can be put into source part.
    if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel(
            "place auth in source is not supported in current libvirt version")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append(
                'unsupported configuration: external snapshot ' +
                'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format,
                               img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            # After 3.9.0,the auth element can be place in source part.
            if auth_place_in_source:
                params.update({"auth_in_source": auth_place_in_source})
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name,
                                          additional_xml_file,
                                          "",
                                          debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
コード例 #17
0
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False, is_create_image_file_in_vm=False):
        """
        Make external snapshots for disks only.

        :param postfix_n: postfix option
        :param snapshot_take: snapshots taken.
        :param is_create_image_file_in_vm: create image file in VM.
        """
        # Add all disks into command line.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, snapshot_take):
            options = "%s_%s %s%s-desc " % (postfix_n, count,
                                            postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)
                clean_snap_file(disk_external)
            if is_check_snapshot_tree:
                options = options.replace("--no-metadata", "")
            cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                test.fail("Failed to make snapshots for disks!")

            if is_create_image_file_in_vm:
                create_file_cmd = "dd if=/dev/urandom of=/mnt/snapshot_%s.img bs=1M count=2" % count
                session.cmd_status_output(create_file_cmd)
                created_image_files_in_vm.append("snapshot_%s.img" % count)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

        def check_snapshot_tree():
            """
            Check whether predefined snapshot names are equals to
            snapshot names by virsh snapshot-list --tree
            """
            predefined_snapshot_name_list = []
            for count in range(1, snapshot_take):
                predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count))
            snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name
            result_output = process.run(snapshot_list_cmd,
                                        ignore_status=True, shell=True).stdout_text
            virsh_snapshot_name_list = []
            for line in result_output.rsplit("\n"):
                strip_line = line.strip()
                if strip_line and "|" not in strip_line:
                    virsh_snapshot_name_list.append(strip_line)
            # Compare two lists in their order and values, all need to be same.
            compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list)
                            if out_p not in out_v]
            if compare_list:
                test.fail("snapshot tree not correctly returned.")

        # If check_snapshot_tree is True, check snapshot tree output.
        if is_check_snapshot_tree:
            check_snapshot_tree()

    def check_vm_disk_file(vm):
        """
        Check current vm disk source.

        :param vm: The vm to be checked
        """
        image_name1, image_format = params.get("image_name", "image"), params.get("image_format", "qcow2")
        image_dir = os.path.join(data_dir.get_data_dir(), image_name1)
        original_image_path = image_dir + "." + image_format
        logging.debug("Source file should be : %s", original_image_path)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        disk = vmxml.get_devices('disk')[0]
        logging.debug("Current disk info is : %s", disk)
        if disk.source.attrs['file'] != original_image_path:
            test.error("Please check current vm disk source")

    def clean_snap_file(snap_path):
        """
        Clean the existed duplicate snap file.

        :param snap_path: snap file path
        """
        if os.path.exists(snap_path):
            os.remove(snap_path)
            logging.debug("Cleaned snap file before creating :%s" % snap_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        first_disk_src = first_device['source']
        return first_disk_src

    def make_relative_path_backing_files(pre_set_root_dir=None):
        """
        Create backing chain files of relative path.
        :param pre_set_root_dir: preset root dir
        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        if pre_set_root_dir is None:
            root_dir = os.path.dirname(first_disk_source)
        else:
            root_dir = pre_set_root_dir
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        if pre_set_root_dir:
            backing_file_dict["b"] = "%s" % first_disk_source
            backing_file_dict["c"] = "%s/b/b.img" % root_dir
            backing_file_dict["d"] = "%s/c/c.img" % root_dir
        disk_format = params.get("disk_format", "qcow2")
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s.img"
                   % (backing_file_path, "qcow2", value, disk_format, key))
            ret = process.run(cmd, shell=True)
            disk_format = "qcow2"
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    def create_reuse_external_snapshots(pre_set_root_dir=None):
        """
        Create reuse external snapshots
        :param pre_set_root_dir: preset root directory
        :return: absolute path of base file
        """
        if pre_set_root_dir is None:
            first_disk_source = get_first_disk_source()
            basename = os.path.basename(first_disk_source)
            root_dir = os.path.dirname(first_disk_source)
        else:
            root_dir = pre_set_root_dir
        meta_options = " --reuse-external --disk-only --no-metadata"
        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "b.img"
        backing_file_dict["c"] = "c.img"
        backing_file_dict["d"] = "d.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            external_snap_shot = "%s/%s" % (backing_file_path, value)
            snapshot_external_disks.append(external_snap_shot)
            options = "%s --diskspec %s,file=%s" % (meta_options, disk_target, external_snap_shot)
            cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                  ignore_status=False,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result)
        logging.debug('reuse external snapshots:%s' % snapshot_external_disks)
        return root_dir

    def check_file_in_vm():
        """
        Check whether certain image files exists in VM internal.
        """
        for img_file in created_image_files_in_vm:
            status, output = session.cmd_status_output("ls -l /mnt/%s" % img_file)
            logging.debug(output)
            if status:
                test.fail("blockcommit from top to base failed when ls image file in VM: %s" % output)

    def do_blockcommit_pivot_repeatedly():
        """
        Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735
        """
        # Make external snapshot,pivot and delete snapshot file repeatedly.
        tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2"
        block_target = 'vda'
        for count in range(0, 5):
            options = "%s " % tmp_snapshot_name
            options += "--disk-only --atomic"
            disk_external = os.path.join(tmp_dir, tmp_snapshot_name)
            options += "  --diskspec  %s,snapshot=external,file=%s" % (block_target, disk_external)
            virsh.snapshot_create_as(vm_name, options,
                                     ignore_status=False, debug=True)
            virsh.blockcommit(vm_name, block_target,
                              " --active --pivot ", ignore_status=False, debug=True)
            virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata")
            libvirt.delete_local_disk('file', disk_external)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    cmd_timeout = params.get("cmd_timeout", "1")
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}
    check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no")
    bandwidth = params.get("blockcommit_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    disk_target = params.get("disk_target", "vda")
    disk_format = params.get("disk_format", "qcow2")
    reuse_external_snapshot = "yes" == params.get("reuse_external_snapshot", "no")
    restart_vm_before_commit = "yes" == params.get("restart_vm_before_commit", "no")
    check_image_file_in_vm = "yes" == params.get("check_image_file_in_vm", "no")
    pre_set_root_dir = None
    blk_source_folder = None
    convert_qcow2_image_to_raw = "yes" == params.get("convert_qcow2_image_to_raw", "no")
    repeatedly_do_blockcommit_pivot = "yes" == params.get("repeatedly_do_blockcommit_pivot", "no")
    from_top_without_active_option = "yes" == params.get("from_top_without_active_option", "no")
    top_to_middle_keep_overlay = "yes" == params.get("top_to_middle_keep_overlay", "no")
    block_disk_type_based_on_file_backing_file = "yes" == params.get("block_disk_type_based_on_file_backing_file", "no")
    block_disk_type_based_on_gluster_backing_file = "yes" == params.get("block_disk_type_based_on_gluster_backing_file", "no")

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_data_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            test.cancel("live active block commit is not supported"
                        " in current libvirt version.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" %
                  vm_name)
    check_vm_disk_file(vm)
    snapshot_external_disks = []
    cmd_session = None
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ''
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'block' and reuse_external_snapshot:
            first_disk = vm.get_first_disk_devices()
            pre_set_root_dir = os.path.dirname(first_disk['source'])

        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide rbd host first.")

                params.update(
                   {"disk_source_name": os.path.join(
                      pool_name, 'rbd_'+utils_misc.generate_random_string(4)+'.img')})
                if utils_package.package_install(["ceph-common"]):
                    ceph.rbd_image_rm(mon_host, *params.get("disk_source_name").split('/'))
                else:
                    test.error('Failed to install ceph-common to clean image.')
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            if convert_qcow2_image_to_raw:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                blk_source_image_after_converted = "%s/converted_%s" % (blk_source_folder, blk_source_image)
                # Convert the image from qcow2 to raw
                convert_disk_cmd = ("qemu-img convert"
                                    " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
                process.run(convert_disk_cmd, ignore_status=False, shell=True)
                params.update({'disk_source_name': blk_source_image_after_converted,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        if repeatedly_do_blockcommit_pivot:
            do_blockcommit_pivot_repeatedly()

        # Create block type disk on file backing file
        if block_disk_type_based_on_file_backing_file or block_disk_type_based_on_gluster_backing_file:
            if not vm.is_alive():
                vm.start()
            first_src_file = get_first_disk_source()
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
            iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True)
            block_type_backstore = iscsi_target
            if block_disk_type_based_on_file_backing_file:
                first_src_file = get_first_disk_source()
            if block_disk_type_based_on_gluster_backing_file:
                first_src_file = "gluster://%s/%s/gluster.qcow2" % (params.get("gluster_server_ip"), params.get("vol_name"))
            backing_file_create_cmd = ("qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s"
                                       % ("qcow2", first_src_file, "qcow2", block_type_backstore))
            process.run(backing_file_create_cmd, ignore_status=False, shell=True)
            meta_options = " --reuse-external --disk-only --no-metadata"
            options = "%s --diskspec %s,file=%s,stype=block" % (meta_options, 'vda', block_type_backstore)
            virsh.snapshot_create_as(vm_name, options,
                                     ignore_status=False,
                                     debug=True)

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []
        created_image_files_in_vm = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        if reuse_external_snapshot:
            make_relative_path_backing_files(pre_set_root_dir)
            blk_source_folder = create_reuse_external_snapshots(pre_set_root_dir)
        else:
            make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree, check_image_file_in_vm)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = ''
            disk_xmls = vmxml.get_devices(device_type="disk")
            for disk in disk_xmls:
                if disk.get('device_tag') == 'disk':
                    disk_xml = disk
                    break

            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            if libvirt_version.version_compare(6, 0, 0):
                bs_source = {'file': blk_source}
                bs_dict = {"type": params.get("disk_type", "file"),
                           "format": {'type': params.get("disk_format", "qcow2")}}
                new_bs = disk_xml.new_backingstore(**bs_dict)
                new_bs["source"] = disk_xml.backingstore.new_source(**bs_source)
                disk_xml.backingstore = new_bs
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n, snapshot_take)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(list(range(snapshot_take))):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                if disk.device != 'disk':
                    continue
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            test.fail("Can't find disk xml with target %s" %
                      blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                test.fail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout %s" % cmd_timeout

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source
        if len(bandwidth):
            blockcommit_options += " --bandwidth %s" % bandwidth
        if bandwidth_byte:
            blockcommit_options += " --bytes"
        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            if reuse_external_snapshot:
                index = len(snapshot_external_disks) - 2
                top_image = snapshot_external_disks[index]
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if from_top_without_active_option:
            blockcommit_options = blockcommit_options.replace("--active", "")

        if top_to_middle_keep_overlay:
            blockcommit_options = blockcommit_options.replace("--active", "")
            blockcommit_options = blockcommit_options.replace("--pivot", "")
            blockcommit_options += " --keep-overlay"

        if restart_vm_before_commit:
            top = 2
            base = len(snapshot_external_disks)
            blockcommit_options = ("--top %s[%d] --base %s[%d] --verbose --wait --keep-relative"
                                   % (disk_target, top, disk_target, base))
            vm.destroy(gracefully=True)
            vm.start()

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        if backing_file_relative_path:
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            block_commit_index = snapshot_take
            expect_backing_file = False
            # Do block commit using --active
            for count in range(1, snapshot_take):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            if top_inactive:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_xml = ''
                disk_xmls = vmxml.get_devices(device_type="disk")
                for disk in disk_xmls:
                    if disk.get('device_tag') == 'disk':
                        disk_xml = disk
                        break

                top_index = 1
                try:
                    top_index = disk_xml.backingstore.index
                except AttributeError:
                    pass
                else:
                    top_index = int(top_index)

                block_commit_index = snapshot_take - 1
                expect_backing_file = True
            for count in range(1, block_commit_index):
                # Do block commit with --wait if top_inactive
                if top_inactive:
                    blockcommit_options = ("  --wait --verbose --top vda[%d] "
                                           "--base vda[%d] --keep-relative"
                                           % (top_index, top_index+1))
                    if not libvirt_version.version_compare(6, 0, 0):
                        top_index = 1
                    else:
                        top_index += 1
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            check_chain_backing_files(blk_source_image, expect_backing_file)
            return

        if reuse_external_snapshot and not top_inactive:
            block_commit_index = len(snapshot_external_disks) - 1
            for index in range(block_commit_index):
                # Do block commit with --shallow --wait
                external_blockcommit_options = ("  --shallow --wait --verbose --top %s "
                                                % (snapshot_external_disks[index]))

                res = virsh.blockcommit(vm_name, blk_target,
                                        external_blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            # Do blockcommit with top active
            result = virsh.blockcommit(vm_name, blk_target,
                                       blockcommit_options, **virsh_dargs)
            # Check status_error
            libvirt.check_exit_status(result, status_error)
            return

        # Start one thread to check the bandwidth in output
        if bandwidth and bandwidth_byte:
            bandwidth += 'B'
            pool = ThreadPool(processes=1)
            pool.apply_async(check_bandwidth_thread, (libvirt.check_blockjob, vm_name, blk_target, bandwidth, test))

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target,
                                   blockcommit_options, **virsh_dargs)
        # Check status_error
        libvirt.check_exit_status(result, status_error)

        # Skip check chain file as per test case description
        if restart_vm_before_commit:
            return

        if check_image_file_in_vm:
            check_file_in_vm()

        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    test.fail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    test.fail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                test.fail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                test.fail("blockcommit job type '%s'"
                                          " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug("after active block commit job "
                                              "ready for pivot, the target disk"
                                              " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                            cmd_result = virsh.blockjob(vm_name, blk_target, '',
                                                        ignore_status=True, debug=True)
                            libvirt.check_exit_status(cmd_result)
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s"
                              % disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    test.fail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name, snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)

        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if backing_file_relative_path or reuse_external_snapshot:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            if blk_source_folder:
                process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)
        if disk_src_protocol == 'iscsi' or 'iscsi_target' in locals():
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
        # Recover images xattr if having some
        dirty_images = get_images_with_xattr(vm)
        if dirty_images:
            clean_images_with_xattr(dirty_images)
            test.fail("VM's image(s) having xattr left")
コード例 #18
0
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        if vm.is_alive():
            vm.destroy()
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
コード例 #19
0
def run(test, params, env):
    """
    Test command: virsh rename.

    The command can rename a domain.
    1.Prepare test environment.
    2.Perform virsh rename operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    # Get specific parameter value
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    vm_ref = params.get("domrename_vm_ref", "name")
    status_error = "yes" == params.get("status_error", "no")
    new_name = params.get("vm_new_name", "new")
    pre_vm_state = params.get("domrename_vm_state", "shutoff")
    domain_option = params.get("dom_opt", "")
    new_name_option = params.get("newname_opt", "")
    add_vm = "yes" == params.get("add_vm", "no")

    # Replace the varaiables
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if new_name == "vm2_name":
        vm2_name = ("%s" % vm_name[:-1])+"2"
        new_name = vm2_name

    # Build input params
    dom_param = ' '.join([domain_option, vm_ref])
    new_name_param = ' '.join([new_name_option, new_name])

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Clone additional vms if needed
    if add_vm:
        try:
            utils_path.find_command("virt-clone")
        except utils_path.CmdNotFoundError:
            if not utils_package.package_install(["virt-install"]):
                test.cancel("Failed to install virt-install on host")
        ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name,
                                                    True, timeout=360)
        if ret_clone.exit_status:
            test.fail("Error occured when clone a second vm!")
        vm2 = libvirt_vm.VM(vm2_name, vm.params, vm.root_dir, vm.address_cache)
        virsh.dom_list("--name --all", debug=True)

    # Create object instance for renamed domain
    new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)

    # Prepare vm state
    if pre_vm_state != "shutoff":
        vm.start()
        if pre_vm_state == "paused":
            vm.pause()
            logging.debug("Domain state is now: %s", vm.state())
        elif pre_vm_state == "managed_saved":
            vm.managedsave()
        elif pre_vm_state == "with_snapshot":
            virsh.snapshot_create_as(vm_name, "snap1 --disk-only", debug=True)
            vm.destroy(gracefully=False)

    try:
        result = virsh.domrename(dom_param, new_name_param, ignore_status=True, debug=True)

        # Raise unexpected pass or fail
        libvirt.check_exit_status(result, status_error)

        # Return expected failure for negative tests
        if status_error:
            logging.debug("Expected failure: %s", result.stderr)
            return

        # Checkpoints after domrename succeed
        else:
            list_ret = virsh.dom_list("--name --all", debug=True).stdout
            domname_ret = virsh.domname(domuuid, debug=True).stdout.strip()
            if new_name not in list_ret or vm_name in list_ret:
                test.fail("New name does not affect in virsh list")
            if domname_ret != new_name:
                test.fail("New domain name does not affect in virsh domname uuid")

            # Try to start vm with the new name
            new_vm.start()

    finally:
        # Remove additional vms
        if add_vm and vm2.exists() and result.exit_status:
            virsh.remove_domain(vm2_name, "--remove-all-storage")

        # Undefine newly renamed domain
        if new_vm.exists():
            if new_vm.is_alive():
                new_vm.destroy(gracefully=False)
            new_vm.undefine()

        # Recover domain state
        if pre_vm_state != "shutoff":
            if pre_vm_state == "with_snapshot":
                libvirt.clean_up_snapshots(vm_name)
            else:
                if pre_vm_state == "managed_saved":
                    vm.start()
                vm.destroy(gracefully=False)

        # Restore VM
        vmxml_backup.sync()
コード例 #20
0
ファイル: startup_policy.py プロジェクト: nasastry/tp-libvirt
def run(test, params, env):
    """
    Test startupPolicy for CD-ROM/floppy/Volume disks.

    Steps:
    1. Prepare disk media image.
    2. Setup startupPolicy for a disk.
    3. Start the domain.
    4. Save the domain.
    5. Remove the disk source file and restore the domain.
    6. Update startupPolicy for  a disk.
    7. Destroy the domain.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    startup_policy = params.get("policy")

    def create_iscsi_pool():
        """
        Setup iSCSI target,and create one iSCSI pool.
        """
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user="",
                                                               chap_passwd="",
                                                               portal_ip=disk_src_host)
        # Define an iSCSI pool xml to create it
        pool_src_xml = pool_xml.SourceXML()
        pool_src_xml.host_name = pool_src_host
        pool_src_xml.device_path = iscsi_target
        poolxml = pool_xml.PoolXML(pool_type=pool_type)
        poolxml.name = pool_name
        poolxml.set_source(pool_src_xml)
        poolxml.target_path = "/dev/disk/by-path"

        # Create iSCSI pool.
        virsh.pool_destroy(pool_name, **virsh_dargs)
        cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def create_volume(pvt, created_vol_name=None):
        """
        Create iSCSI volume.

        :param pvt: PoolVolumeTest object
        :param created_vol_name: Created volume name
        """
        try:
            if pool_type == "iscsi":
                create_iscsi_pool()
            else:
                pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
                pvt.pre_vol(vol_name=created_vol_name, vol_format=vol_format,
                            capacity=capacity, allocation=None,
                            pool_name=pool_name)
        except Exception as pool_exception:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **virsh_dargs)
            test.error("Error occurred when prepare" +
                       "pool xml with message %s:\n" % str(pool_exception))

        def get_vol():
            """Get the volume info"""
            # Refresh the pool
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(pool_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            vol_list = []
            vol_list = re.findall(r"(\S+)\ +(\S+)",
                                  str(cmd_result.stdout.strip()))
            try:
                return vol_list[1]
            except IndexError:
                return None
        # Wait for a while so that we can get the volume info
        vol_info = utils_misc.wait_for(get_vol, 10)
        if vol_info:
            tmp_vol_name, tmp_vol_path = vol_info
        else:
            test.error("Failed to get volume info")
        process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'),
                    shell=True)
        return vol_info

    def check_disk_source(vm_name, target_dev, expect_value):
        """
        Check the disk source: file and startupPolicy.

        :param vm_name: Domain name
        :param target_dev: Disk's target device
        :param expect_value: Expect value of source file and source startupPolicy
        """
        logging.debug("Expect source file is '%s'", expect_value[0])
        logging.debug("Expect source startupPolicy is '%s'", expect_value[1])
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        source_value = []
        try:
            disk_source = disks[target_dev].find('source')
            source_value.append(disk_source.get('file'))
            source_value.append(disk_source.get('startupPolicy'))
        except KeyError:
            test.error("No %s in domain %s" % (target_dev, vm_name))
        logging.debug("Actual source file is '%s'", source_value[0])
        logging.debug("Actual source startupPolicy is '%s'", source_value[1])
        if source_value == expect_value:
            logging.debug("Domain disk XML check pass")
        else:
            test.error("Domain disk XML check fail")

    def create_disk_xml():
        """
        Create a disk xml file for attaching to a domain.
        """
        if disk_type == "file":
            process.run("qemu-img create %s %s" % (media_file, image_size), shell=True)
        disk_params = {'device_type': device_type,
                       'type_name': disk_type,
                       'target_dev': target_dev,
                       'target_bus': target_bus}
        if disk_type == "file":
            disk_params_src = {'source_protocol': "file",
                               'source_file': media_file,
                               'source_startupPolicy': startup_policy}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': pool_name,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_startupPolicy': startup_policy}
            if pool_type == "iscsi":
                disk_params_src.update({'source_mode': "host"})
        disk_params.update(disk_params_src)
        disk_xml = libvirt.create_disk_xml(disk_params)
        shutil.copyfile(disk_xml, disk_xml_file)
        return disk_xml

    def check_in_vm(old_parts):
        """
        Check mount/read/write disk in VM.

        :param old_parts: pre-operated partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            logging.debug("new parted:%s", new_parts)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            added_part = added_parts[0]
            if not added_part:
                logging.error("Can't see added partition in VM")
                return False
            if 'sr' not in added_part and 'fd' not in added_part:
                cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                       "mkdir -p test && mount /dev/{0} test && echo"
                       " teststring > test/testfile && umount test"
                       .format(added_part))
                status, output = session.cmd_status_output(cmd)
                logging.info("Check disk operation in VM:\n%s", output)
                if status != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_policy_update(origin_policy, policy_list, xml_policy_file, device_type, flag_str):
        """
        Check updated policy after executing virsh update-device.

        :param origin_policy: the inherit startup policy value.
        :param policy_list: updated policy list.
        :param xml_policy_file: xml file for startupPolicy.
        :param device_type: device type,cdrom or disk.,etc
        :param flag_str: it can be --config,--live and --persistent.
        """
        for policy in policy_list:
            xmltreefile = XMLTreeFile(xml_policy_file)
            try:
                policy_item = xmltreefile.find('/source')
                policy_item.set('startupPolicy', policy)
            except AttributeError as elem_attr:
                test.error("Fail to find startupPolicy attribute.%s", str(elem_attr))
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_str, debug=True)
            if all([device_type == "disk", policy == "requisite"]):
                libvirt.check_exit_status(ret, True)
                return
            else:
                libvirt.check_exit_status(ret)

            def check_policy_value(active_policy, inactive_policy):
                """
                Check policy value in dumpxml with active or inactive option

                :param active_policy: active policy attribute value
                :param inactive_policy: inactive policy attribute value
                """
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list)-1]
                if not active_policy == disk.source.attrs["startupPolicy"]:
                    test.error("Actual policy:%s in active state is not equal to expected:%s"
                               % (active_policy, disk.source.attrs["startupPolicy"]))
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list)-1]
                if not inactive_policy == disk.source.attrs["startupPolicy"]:
                    test.error("Actual policy:%s in inactive state is not equal to expected: %s"
                               % (inactive_policy, disk.source.attrs["startupPolicy"]))
            if flag_str == "--live":
                check_policy_value(policy, origin_policy)
            elif flag_str == "--config":
                check_policy_value(origin_policy, policy)
            elif flag_str == "--persistent":
                check_policy_value(policy, policy)

    def check_source_update(xml_policy_file):
        """
        Update source and policy at the same time,then check those changes.

        :param xml_policy_file: VM xml policy file
        """
        xmltreefile = XMLTreeFile(xml_policy_file)
        policy_item = xmltreefile.find('/source')

        def configure_startup_policy(update=False, policy='optional'):
            """
            Configure startupPolicy attribute value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            if update:
                del policy_item.attrib["startupPolicy"]
            else:
                policy_item.set("startupPolicy", policy)
            flag_option = "--live"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        # Update source and startUpPolicy attribute value.
        def update_source_policy(update=True, policy='optional'):
            """
            Update startupPolicy source value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            source_file = policy_item.get('file')
            if update:
                new_source_file = source_file+".empty"
            else:
                new_source_file = source_file+".new"
            shutil.copyfile(source_file, new_source_file)
            policy_item.set("file", new_source_file)
            policy_item.set("startupPolicy", policy)
            flag_option = "--persistent"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        function_list = [configure_startup_policy, update_source_policy,
                         configure_startup_policy, update_source_policy]
        function_parameter = [False, False, True, True]
        # Loop all above scenarios to update device.
        for index in list(range(len(function_list))):
            try:
                func = function_list[index]
                para = function_parameter[index]
                flag_option, update_error = func(para)
                ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True)
                libvirt.check_exit_status(ret, expect_error=update_error)
            except AttributeError as elem_attr:
                test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr))
            except Exception as update_device_exception:
                test.error("Fail to update device:%s" % str(update_device_exception))
            finally:
                source_file = policy_item.get('file')
                new_source_file = source_file+".new"
                if os.path.exists(new_source_file):
                    os.remove(new_source_file)

    def rename_file(source_file, target_file, revert=False):
        """
        Rename a file or revert it.

        :param source_file: The source file name.
        :param target_file: The target file name.
        :param revert: It can be True or False.
        """
        try:
            if not revert:
                os.rename(source_file, target_file)
                logging.debug("Rename %s to %s", source_file, target_file)
            else:
                os.rename(target_file, source_file)
                logging.debug("Rename %s to %s", target_file, source_file)
        except OSError as err:
            test.fail("Rename image failed: %s" % str(err))

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Get start,restore configuration parameters.
    start_error = "yes" == params.get("start_error", "no")
    restore_error = "yes" == params.get("restore_error", "no")
    virsh_dargs = {'debug': True, 'ignore_status': True}
    attach_option = params.get("attach_option")

    # Create disk xml and attach it.
    device_type = params.get("device_type")
    disk_type = params.get("disk_type", "network")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    target_dev = params.get("target_dev")
    target_bus = params.get("disk_target_bus", "virtio")
    image_size = params.get("image_size", "1.44M")
    emulated_image = "emulated-iscsi"

    # Storage pool and volume related paramters.
    pool_name = params.get("pool_name", "iscsi_pool")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    vol_name = params.get("volume_name")
    capacity = params.get("volume_size", "1048576")
    vol_format = params.get("volume_format")

    # Source file parameters.
    media_name = params.get("media_name")
    media_file = os.path.join(data_dir.get_tmp_dir(), media_name)
    media_file_new = media_file + ".new"
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    snapshot_name = "s1"

    # Policy related paramters.
    disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml")
    disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(), "attach_policy_disk.xml")
    update_policy = "yes" == params.get("update_policy", "no")
    policy_only = "yes" == params.get("policy_only", "no")
    update_policy_list = params.get("update_policy_list").split()
    expect_value = [None, startup_policy]

    try:
        if disk_type == "volume":
            pvt = libvirt.PoolVolumeTest(test, params)
            vol_name, vol_path = create_volume(pvt, vol_name)
            vol_path_new = vol_path + ".new"

        # Create disk xml.
        create_disk_xml()
        if vm.is_alive():
            vm.destroy()
        try:
            # Backup disk xml file for policy update if update_policy=True.
            if update_policy:
                shutil.copyfile(disk_xml_file, disk_xml_policy_file)
            result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml_file,
                                         flagstr="--config", **virsh_dargs)
            # For iSCSI pool volume,startupPolicy attribute is not valid for it.
            # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy.
            if pool_type == "iscsi" or all([device_type == "disk", startup_policy == "requisite"]):
                libvirt.check_exit_status(result, expect_error=True)
                return
            else:
                libvirt.check_exit_status(result, expect_error=False)
        except Exception as attach_device_exception:
            logging.debug("Attach device throws exception:%s", str(attach_device_exception))
            os.remove(media_file)
            test.error("Attach %s fail" % device_type)
        # Check update policy operations.
        if disk_type == "file" and update_policy:
            vm.start()
            if policy_only:
                check_policy_update(startup_policy, update_policy_list,
                                    disk_xml_policy_file, device_type, attach_option)
            else:
                check_source_update(disk_xml_policy_file)
        elif disk_type == "file":
            # Step 1. Start domain and destroy it normally
            vm.start()
            vm.destroy()

            # Step 2. Remove the source_file then start the domain
            rename_file(media_file, media_file_new)
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # For libvirt version >=2.0.0, feature is updated and startup policy attribute
            # can not exist alone without source protocol.
            if not start_error and not libvirt_version.version_compare(2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 3. Move back the source file and start the domain(if needed).
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                vm.start()

            # Step 4. Save the domain normally, then remove the source file
            # and restore it back
            vm.save_to_file(save_file)
            rename_file(media_file, media_file_new)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)
            if not restore_error and not libvirt_version.version_compare(2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 5. Move back the source file and restore the domain(if needed)
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                result = virsh.restore(save_file, **virsh_dargs)
                libvirt.check_exit_status(result, expect_error=False)
        elif disk_type == "volume":
            # Step 1. Start domain and destroy it normally.
            vm.start()
            # Step 1 Start VM successfully.
            if not check_in_vm(old_parts):
                test.fail("Check disk partitions in VM failed")

            # Step 2 Move the volume to other place, refresh the pool, then reboot the guest.
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            vm.destroy()
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # Step 3 Move back the source file and start.
            rename_file(vol_path, vol_path_new, revert=True)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            if not vm.is_alive():
                vm.start()

            # Step 4 Save the domain normally, then remove the source file,then restore domain.
            vm.save_to_file(save_file)
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)

            # Step 5, Create snapshot,move the source to other place,then revert snapshot.
            if device_type == "disk":
                rename_file(vol_path, vol_path_new, revert=True)
                cmd_result = virsh.pool_refresh(pool_name)
                libvirt.check_exit_status(cmd_result)
                if restore_error:
                    result = virsh.restore(save_file, **virsh_dargs)
                    libvirt.check_exit_status(result)
                ret = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                rename_file(vol_path, vol_path_new)
                ret = virsh.snapshot_revert(vm_name, snapshot_name, **virsh_dargs)
                # Clean up snapshot.
                libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()

        if disk_type == "volume":
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **virsh_dargs)
        if os.path.exists(save_file):
            os.remove(save_file)
        if os.path.exists(disk_xml_file):
            os.remove(disk_xml_file)
        if os.path.exists(media_file):
            os.remove(media_file)
        if os.path.exists(disk_xml_policy_file):
            os.remove(disk_xml_policy_file)
コード例 #21
0
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(postfix_n, snapshot_take):
        """
        Make external snapshots for disks only.

        :param postfix_n: postfix option
        :param snapshot_take: snapshots taken.
        """
        # Add all disks into command line.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, snapshot_take):
            options = "%s_%s %s%s-desc " % (postfix_n, count,
                                            postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)

            cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                test.fail("Failed to make snapshots for disks!")

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        first_disk_src = first_device['source']
        return first_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.
        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                   % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            test.cancel("live active block commit is not supported"
                        " in current libvirt version.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" %
                  vm_name)

    snapshot_external_disks = []
    cmd_session = None
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide rbd host first.")
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        make_disk_snapshot(postfix_n, snapshot_take)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = ''
            disk_xmls = vmxml.get_devices(device_type="disk")
            for disk in disk_xmls:
                if disk.get('device_tag') == 'disk':
                    disk_xml = disk
                    break

            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n, snapshot_take)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(list(range(snapshot_take))):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                if disk.device != 'disk':
                    continue
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            test.fail("Can't find disk xml with target %s" %
                      blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introuduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                test.fail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout 1"

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source

        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        if backing_file_relative_path:
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            block_commit_index = snapshot_take
            expect_backing_file = False
            # Do block commit using --active
            for count in range(1, snapshot_take):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            if top_inactive:
                blockcommit_options = "  --wait --verbose --top vda[1] --base vda[2] --keep-relative"
                block_commit_index = snapshot_take - 1
                expect_backing_file = True
            # Do block commit with --wait if top_inactive
            for count in range(1, block_commit_index):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            check_chain_backing_files(blk_source_image, expect_backing_file)
            return

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target,
                                   blockcommit_options, **virsh_dargs)

        # Check status_error
        libvirt.check_exit_status(result, status_error)
        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    test.fail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    test.fail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                test.fail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                test.fail("blockcommit job type '%s'"
                                          " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug("after active block commit job "
                                              "ready for pivot, the target disk"
                                              " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                            cmd_result = virsh.blockjob(vm_name, blk_target, '',
                                                        ignore_status=True, debug=True)
                            libvirt.check_exit_status(cmd_result)
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                test.fail("Domain image backing "
                                          "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s"
                              % disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            test.fail("Domain image backing chain "
                                      "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    test.fail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name, snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)
        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
コード例 #22
0
ファイル: virsh_domrename.py プロジェクト: jgao12345/learngit
def run(test, params, env):
    """
    Test command: virsh rename.

    The command can rename a domain.
    1.Prepare test environment.
    2.Perform virsh rename operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    # Get specific parameter value
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    vm_ref = params.get("domrename_vm_ref", "name")
    status_error = "yes" == params.get("status_error", "no")
    new_name = params.get("vm_new_name", "new")
    pre_vm_state = params.get("domrename_vm_state", "shutoff")
    domain_option = params.get("dom_opt", "")
    new_name_option = params.get("newname_opt", "")
    add_vm = "yes" == params.get("add_vm", "no")

    # Replace the varaiables
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if new_name == "vm2_name":
        vm2_name = ("%s" % vm_name[:-1]) + "2"
        new_name = vm2_name

    # Build input params
    dom_param = ' '.join([domain_option, vm_ref])
    new_name_param = ' '.join([new_name_option, new_name])

    if vm.is_alive():
        vm.destroy()

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Clone additional vms if needed
    if add_vm:
        try:
            utils_path.find_command("virt-clone")
        except utils_path.CmdNotFoundError:
            if not utils_package.package_install(["virt-install"]):
                test.cancel("Failed to install virt-install on host")
        ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                    vm2_name,
                                                    True,
                                                    timeout=360)
        if ret_clone.exit_status:
            test.fail("Error occured when clone a second vm!")
        vm2 = libvirt_vm.VM(vm2_name, vm.params, vm.root_dir, vm.address_cache)
        virsh.dom_list("--name --all", debug=True)

    # Create object instance for renamed domain
    new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)

    # Prepare vm state
    if pre_vm_state not in ["shutoff", "autostart"]:
        vm.start()
        if pre_vm_state == "paused":
            vm.pause()
            logging.debug("Domain state is now: %s", vm.state())
        elif pre_vm_state == "managed_saved":
            vm.managedsave()
        elif pre_vm_state == "with_snapshot":
            virsh.snapshot_create_as(vm_name, "snap1 --disk-only", debug=True)
            vm.destroy(gracefully=False)

    try:
        if pre_vm_state == "autostart":
            virsh.autostart(dom_param, "", debug=True)
            virsh.dom_list("--all --autostart", debug=True)
            logging.debug("files under '/etc/libvirt/qemu/autostart/' are %s",
                          os.listdir('/etc/libvirt/qemu/autostart/'))

        result = virsh.domrename(dom_param,
                                 new_name_param,
                                 ignore_status=True,
                                 debug=True)

        # Raise unexpected pass or fail
        libvirt.check_exit_status(result, status_error)

        # Return expected failure for negative tests
        if status_error:
            logging.debug("Expected failure: %s", result.stderr)
            return

        # Checkpoints after domrename succeed
        else:
            list_ret = virsh.dom_list("--name --all",
                                      debug=True).stdout.strip().splitlines()
            domname_ret = virsh.domname(domuuid, debug=True).stdout.strip()
            if new_name not in list_ret or vm_name in list_ret:
                test.fail("New name does not affect in virsh list")
            if domname_ret != new_name:
                test.fail(
                    "New domain name does not affect in virsh domname uuid")

            if pre_vm_state != "autostart":
                # Try to start vm with the new name
                new_vm.start()
            else:
                utils_libvirtd.libvirtd_restart()
                list_autostart = virsh.dom_list("--autostart",
                                                debug=True).stdout
                logging.debug(
                    "files under '/etc/libvirt/qemu/autostart/' are %s",
                    os.listdir('/etc/libvirt/qemu/autostart/'))
                process.run("file /etc/libvirt/qemu/autostart/%s.xml" %
                            vm_name,
                            verbose=True)
                if new_name not in list_autostart:
                    test.fail(
                        "Domain isn't autostarted after restart libvirtd,"
                        "or becomes a never 'autostart' one.")

    finally:
        # Remove additional vms
        if add_vm and vm2.exists() and result.exit_status:
            virsh.remove_domain(vm2_name, "--remove-all-storage")

        # Undefine newly renamed domain
        if new_vm.exists():
            if new_vm.is_alive():
                new_vm.destroy(gracefully=False)
            new_vm.undefine()

        # Recover domain state
        if pre_vm_state != "shutoff":
            if pre_vm_state == "with_snapshot":
                libvirt.clean_up_snapshots(vm_name)
            else:
                if pre_vm_state == "managed_saved":
                    vm.start()
                vm.destroy(gracefully=False)

        # Restore VM
        vmxml_backup.sync()
コード例 #23
0
ファイル: vcpu_misc.py プロジェクト: yalzhang/tp-libvirt
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()

        if customize_cpu_features:
            for idx in range(len(cpu_xml.get_feature_list()) - 1, -1, -1):
                cpu_xml.remove_feature(idx)
            domcapa_xml = domcapability_xml.DomCapabilityXML()
            features = domcapa_xml.get_additional_feature_list(
                'host-model', ignore_features=None)
            for feature in features:
                for feature_name, feature_policy in feature.items():
                    # For host-passthrough mode, adding "invtsc" requires
                    # more settings, so it will be ignored.
                    if feature_name != "invtsc":
                        cpu_xml.add_feature(feature_name, feature_policy)

        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def check_feature_list(vm, original_dict):
        """
        Compare new cpu feature list and original cpu

        :param vm: VM object
        :original_dict: Cpu feature dict , {"name1":"policy1","name2":"policy2"}
        """
        new_cpu_xml = vm_xml.VMXML.new_from_dumpxml(vm.name).cpu
        new_feature_dict = new_cpu_xml.get_dict_type_feature()
        if new_feature_dict != original_dict:
            test.fail('CPU feature lists are different, original is :%s,'
                      ' new is %s:' % (original_dict, new_feature_dict))

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")
    customize_cpu_features = "yes" == params.get("customize_cpu_features",
                                                 "no")
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        cpu_xml = vmxml.cpu
        feature_dict = cpu_xml.get_dict_type_feature()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)

            # Add case: Check cpu xml after domain Managedsaved and restored
            if test_operations:
                for item in test_operations.split(','):
                    if item == "managedsave_restore":
                        # (1)Domain Manage saved
                        virsh.managedsave(vm_name,
                                          ignore_status=False,
                                          debug=True)
                        check_feature_list(vm, feature_dict)
                        # (2)Domain Restore
                        virsh.restore(managed_save_file,
                                      ignore_status=False,
                                      debug=True)
                        # (5)Check mode and feature list here
                        libvirt.check_dumpxml(vm, cpu_mode)
                        check_feature_list(vm, feature_dict)

    finally:
        logging.debug("Recover test environment")
        if os.path.exists(managed_save_file):
            virsh.managedsave_remove(vm_name, debug=True)
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
コード例 #24
0
                ret = virsh.detach_device(vm_name, disks_xml[i].xml,
                                          flagstr=attach_option)
                os.remove(disks_xml[i].xml)
                libvirt.check_exit_status(ret)

            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices,
                                           device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name, snapshot, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        virsh.undefine(vm_name)
        virsh.define(vm_xml_file)
        os.remove(vm_xml_file)

        # Restore qemu_config file.
        qemu_config.restore()
        utils_libvirtd.libvirtd_restart()
コード例 #25
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list."
                      % snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    cpu_vendor_id = params.get("vendor_id")
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")
    expected_qemuline = params.get("expected_qemuline")
    cmd_in_guest = params.get("cmd_in_guest")
    test_operations = params.get("test_operations")
    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if cpu_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]
            if not re.search(host_vendor, cpu_vendor_id, re.IGNORECASE):
                test.cancel("Not supported cpu vendor_id {} on this host."
                            .format(cpu_vendor_id))

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml

        vmxml.sync()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result)

        if expected_str_after_startup:
            libvirt.check_dumpxml(vm, expected_str_after_startup)

        if expected_qemuline:
            libvirt.check_qemu_cmd_line(expected_qemuline)

        if cmd_in_guest:
            vm_session = vm.wait_for_login()
            if vm_session.cmd_status(cmd_in_guest):
                vm_session.close()
                test.fail("Failed to run '%s' in vm." % cmd_in_guest)
            vm_session.close()

    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
コード例 #26
0
def run(test, params, env):
    """
    Test startupPolicy for CD-ROM/floppy/Volume disks.

    Steps:
    1. Prepare disk media image.
    2. Setup startupPolicy for a disk.
    3. Start the domain.
    4. Save the domain.
    5. Remove the disk source file and restore the domain.
    6. Update startupPolicy for  a disk.
    7. Destroy the domain.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    startup_policy = params.get("policy")

    def create_iscsi_pool():
        """
        Setup iSCSI target,and create one iSCSI pool.
        """
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size='1G',
            chap_user="",
            chap_passwd="",
            portal_ip=disk_src_host)
        # Define an iSCSI pool xml to create it
        pool_src_xml = pool_xml.SourceXML()
        pool_src_xml.host_name = pool_src_host
        pool_src_xml.device_path = iscsi_target
        poolxml = pool_xml.PoolXML(pool_type=pool_type)
        poolxml.name = pool_name
        poolxml.set_source(pool_src_xml)
        poolxml.target_path = "/dev/disk/by-path"

        # Create iSCSI pool.
        virsh.pool_destroy(pool_name, **virsh_dargs)
        cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def create_volume(pvt, created_vol_name=None):
        """
        Create iSCSI volume.

        :param pvt: PoolVolumeTest object
        :param created_vol_name: Created volume name
        """
        try:
            if pool_type == "iscsi":
                create_iscsi_pool()
            else:
                pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
                pvt.pre_vol(vol_name=created_vol_name,
                            vol_format=vol_format,
                            capacity=capacity,
                            allocation=None,
                            pool_name=pool_name)
        except Exception as pool_exception:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **virsh_dargs)
            test.error("Error occurred when prepare" +
                       "pool xml with message %s:\n" % str(pool_exception))

        def get_vol():
            """Get the volume info"""
            # Refresh the pool
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(pool_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            vol_list = []
            vol_list = re.findall(r"(\S+)\ +(\S+)",
                                  str(cmd_result.stdout.strip()))
            try:
                return vol_list[1]
            except IndexError:
                return None

        # Wait for a while so that we can get the volume info
        vol_info = utils_misc.wait_for(get_vol, 10)
        if vol_info:
            tmp_vol_name, tmp_vol_path = vol_info
        else:
            test.error("Failed to get volume info")
        process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'),
                    shell=True)
        return vol_info

    def check_disk_source(vm_name, target_dev, expect_value):
        """
        Check the disk source: file and startupPolicy.

        :param vm_name: Domain name
        :param target_dev: Disk's target device
        :param expect_value: Expect value of source file and source startupPolicy
        """
        logging.debug("Expect source file is '%s'", expect_value[0])
        logging.debug("Expect source startupPolicy is '%s'", expect_value[1])
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        source_value = []
        try:
            disk_source = disks[target_dev].find('source')
            source_value.append(disk_source.get('file'))
            source_value.append(disk_source.get('startupPolicy'))
        except KeyError:
            test.error("No %s in domain %s" % (target_dev, vm_name))
        logging.debug("Actual source file is '%s'", source_value[0])
        logging.debug("Actual source startupPolicy is '%s'", source_value[1])
        if source_value == expect_value:
            logging.debug("Domain disk XML check pass")
        else:
            test.error("Domain disk XML check fail")

    def create_disk_xml():
        """
        Create a disk xml file for attaching to a domain.
        """
        if disk_type == "file":
            process.run("qemu-img create %s %s" % (media_file, image_size),
                        shell=True)
        disk_params = {
            'device_type': device_type,
            'type_name': disk_type,
            'target_dev': target_dev,
            'target_bus': target_bus
        }
        if disk_type == "file":
            disk_params_src = {
                'source_protocol': "file",
                'source_file': media_file,
                'source_startupPolicy': startup_policy
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': pool_name,
                'source_volume': vol_name,
                'driver_type': 'qcow2',
                'source_startupPolicy': startup_policy
            }
            if pool_type == "iscsi":
                disk_params_src.update({'source_mode': "host"})
        disk_params.update(disk_params_src)
        disk_xml = libvirt.create_disk_xml(disk_params)
        shutil.copyfile(disk_xml, disk_xml_file)
        return disk_xml

    def check_in_vm(old_parts):
        """
        Check mount/read/write disk in VM.

        :param old_parts: pre-operated partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            logging.debug("new parted:%s", new_parts)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            added_part = added_parts[0]
            if not added_part:
                logging.error("Can't see added partition in VM")
                return False
            if 'sr' not in added_part and 'fd' not in added_part:
                cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                       "mkdir -p test && mount /dev/{0} test && echo"
                       " teststring > test/testfile && umount test".format(
                           added_part))
                status, output = session.cmd_status_output(cmd)
                logging.info("Check disk operation in VM:\n%s", output)
                if status != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_policy_update(origin_policy, policy_list, xml_policy_file,
                            device_type, flag_str):
        """
        Check updated policy after executing virsh update-device.

        :param origin_policy: the inherit startup policy value.
        :param policy_list: updated policy list.
        :param xml_policy_file: xml file for startupPolicy.
        :param device_type: device type,cdrom or disk.,etc
        :param flag_str: it can be --config,--live and --persistent.
        """
        for policy in policy_list:
            xmltreefile = XMLTreeFile(xml_policy_file)
            try:
                policy_item = xmltreefile.find('/source')
                policy_item.set('startupPolicy', policy)
            except AttributeError as elem_attr:
                test.error("Fail to find startupPolicy attribute.%s",
                           str(elem_attr))
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            ret = virsh.update_device(vm_name,
                                      xml_policy_file,
                                      flagstr=flag_str,
                                      debug=True)
            if all([device_type == "disk", policy == "requisite"]):
                libvirt.check_exit_status(ret, True)
                return
            else:
                libvirt.check_exit_status(ret)

            def check_policy_value(active_policy, inactive_policy):
                """
                Check policy value in dumpxml with active or inactive option

                :param active_policy: active policy attribute value
                :param inactive_policy: inactive policy attribute value
                """
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list) - 1]
                if not active_policy == disk.source.attrs["startupPolicy"]:
                    test.error(
                        "Actual policy:%s in active state is not equal to expected:%s"
                        % (active_policy, disk.source.attrs["startupPolicy"]))
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list) - 1]
                if not inactive_policy == disk.source.attrs["startupPolicy"]:
                    test.error(
                        "Actual policy:%s in inactive state is not equal to expected: %s"
                        %
                        (inactive_policy, disk.source.attrs["startupPolicy"]))

            if flag_str == "--live":
                check_policy_value(policy, origin_policy)
            elif flag_str == "--config":
                check_policy_value(origin_policy, policy)
            elif flag_str == "--persistent":
                check_policy_value(policy, policy)

    def check_source_update(xml_policy_file):
        """
        Update source and policy at the same time,then check those changes.

        :param xml_policy_file: VM xml policy file
        """
        xmltreefile = XMLTreeFile(xml_policy_file)
        policy_item = xmltreefile.find('/source')

        def configure_startup_policy(update=False, policy='optional'):
            """
            Configure startupPolicy attribute value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            if update:
                del policy_item.attrib["startupPolicy"]
            else:
                policy_item.set("startupPolicy", policy)
            flag_option = "--live"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        # Update source and startUpPolicy attribute value.
        def update_source_policy(update=True, policy='optional'):
            """
            Update startupPolicy source value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            source_file = policy_item.get('file')
            if update:
                new_source_file = source_file + ".empty"
            else:
                new_source_file = source_file + ".new"
            shutil.copyfile(source_file, new_source_file)
            policy_item.set("file", new_source_file)
            policy_item.set("startupPolicy", policy)
            flag_option = "--persistent"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        function_list = [
            configure_startup_policy, update_source_policy,
            configure_startup_policy, update_source_policy
        ]
        function_parameter = [False, False, True, True]
        # Loop all above scenarios to update device.
        for index in list(range(len(function_list))):
            try:
                func = function_list[index]
                para = function_parameter[index]
                flag_option, update_error = func(para)
                ret = virsh.update_device(vm_name,
                                          xml_policy_file,
                                          flagstr=flag_option,
                                          debug=True)
                libvirt.check_exit_status(ret, expect_error=update_error)
            except AttributeError as elem_attr:
                test.error("Fail to remove startupPolicy attribute:%s" %
                           str(elem_attr))
            except Exception as update_device_exception:
                test.error("Fail to update device:%s" %
                           str(update_device_exception))
            finally:
                source_file = policy_item.get('file')
                new_source_file = source_file + ".new"
                if os.path.exists(new_source_file):
                    os.remove(new_source_file)

    def rename_file(source_file, target_file, revert=False):
        """
        Rename a file or revert it.

        :param source_file: The source file name.
        :param target_file: The target file name.
        :param revert: It can be True or False.
        """
        try:
            if not revert:
                os.rename(source_file, target_file)
                logging.debug("Rename %s to %s", source_file, target_file)
            else:
                os.rename(target_file, source_file)
                logging.debug("Rename %s to %s", target_file, source_file)
        except OSError as err:
            test.fail("Rename image failed: %s" % str(err))

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Get start,restore configuration parameters.
    start_error = "yes" == params.get("start_error", "no")
    restore_error = "yes" == params.get("restore_error", "no")
    virsh_dargs = {'debug': True, 'ignore_status': True}
    attach_option = params.get("attach_option")

    # Create disk xml and attach it.
    device_type = params.get("device_type")
    disk_type = params.get("disk_type", "network")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    target_dev = params.get("target_dev")
    target_bus = params.get("disk_target_bus", "virtio")
    image_size = params.get("image_size", "1.44M")
    emulated_image = "emulated-iscsi"

    # Storage pool and volume related paramters.
    pool_name = params.get("pool_name", "iscsi_pool")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    vol_name = params.get("volume_name")
    capacity = params.get("volume_size", "1048576")
    vol_format = params.get("volume_format")

    # Source file parameters.
    media_name = params.get("media_name")
    media_file = os.path.join(data_dir.get_tmp_dir(), media_name)
    media_file_new = media_file + ".new"
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    snapshot_name = "s1"

    # Policy related paramters.
    disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml")
    disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(),
                                        "attach_policy_disk.xml")
    update_policy = "yes" == params.get("update_policy", "no")
    policy_only = "yes" == params.get("policy_only", "no")
    update_policy_list = params.get("update_policy_list").split()
    expect_value = [None, startup_policy]

    try:
        if disk_type == "volume":
            pvt = libvirt.PoolVolumeTest(test, params)
            vol_name, vol_path = create_volume(pvt, vol_name)
            vol_path_new = vol_path + ".new"

        # Create disk xml.
        create_disk_xml()
        if vm.is_alive():
            vm.destroy()
        try:
            # Backup disk xml file for policy update if update_policy=True.
            if update_policy:
                shutil.copyfile(disk_xml_file, disk_xml_policy_file)
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml_file,
                                         flagstr="--config",
                                         **virsh_dargs)
            # For iSCSI pool volume,startupPolicy attribute is not valid for it.
            # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy.
            if pool_type == "iscsi" or all(
                [device_type == "disk", startup_policy == "requisite"]):
                libvirt.check_exit_status(result, expect_error=True)
                return
            else:
                libvirt.check_exit_status(result, expect_error=False)
        except Exception as attach_device_exception:
            logging.debug("Attach device throws exception:%s",
                          str(attach_device_exception))
            os.remove(media_file)
            test.error("Attach %s fail" % device_type)
        # Check update policy operations.
        if disk_type == "file" and update_policy:
            vm.start()
            if policy_only:
                check_policy_update(startup_policy, update_policy_list,
                                    disk_xml_policy_file, device_type,
                                    attach_option)
            else:
                check_source_update(disk_xml_policy_file)
        elif disk_type == "file":
            # Step 1. Start domain and destroy it normally
            vm.start()
            vm.destroy()

            # Step 2. Remove the source_file then start the domain
            rename_file(media_file, media_file_new)
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # For libvirt version >=2.0.0, feature is updated and startup policy attribute
            # can not exist alone without source protocol.
            if not start_error and not libvirt_version.version_compare(
                    2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 3. Move back the source file and start the domain(if needed).
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                vm.start()

            # Step 4. Save the domain normally, then remove the source file
            # and restore it back
            vm.save_to_file(save_file)
            rename_file(media_file, media_file_new)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)
            if not restore_error and not libvirt_version.version_compare(
                    2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 5. Move back the source file and restore the domain(if needed)
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                result = virsh.restore(save_file, **virsh_dargs)
                libvirt.check_exit_status(result, expect_error=False)
        elif disk_type == "volume":
            # Step 1. Start domain and destroy it normally.
            vm.start()
            # Step 1 Start VM successfully.
            if not check_in_vm(old_parts):
                test.fail("Check disk partitions in VM failed")

            # Step 2 Destroy VM, move the volume to other place, refresh the pool, then start the guest.
            vm.destroy()
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # Step 3 Move back the source file and start.
            rename_file(vol_path, vol_path_new, revert=True)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            if not vm.is_alive():
                vm.start()

            # Step 4 Save the domain normally, then remove the source file,then restore domain.
            vm.save_to_file(save_file)
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)

            # Step 5, Create snapshot,move the source to other place,then revert snapshot.
            if device_type == "disk":
                rename_file(vol_path, vol_path_new, revert=True)
                cmd_result = virsh.pool_refresh(pool_name)
                libvirt.check_exit_status(cmd_result)
                if restore_error:
                    result = virsh.restore(save_file, **virsh_dargs)
                    libvirt.check_exit_status(result)
                ret = virsh.snapshot_create_as(vm_name, snapshot_name,
                                               **virsh_dargs)
                libvirt.check_exit_status(ret)
                rename_file(vol_path, vol_path_new)
                ret = virsh.snapshot_revert(vm_name, snapshot_name,
                                            **virsh_dargs)
                # Clean up snapshot.
                libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()

        if disk_type == "volume":
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **virsh_dargs)
        if os.path.exists(save_file):
            os.remove(save_file)
        if os.path.exists(disk_xml_file):
            os.remove(disk_xml_file)
        if os.path.exists(media_file):
            os.remove(media_file)
        if os.path.exists(disk_xml_policy_file):
            os.remove(disk_xml_policy_file)
コード例 #27
0
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(snapshot_take):
        """
        Make external snapshots for disks only.

        :param snapshot_take: snapshots taken.
        """
        for count in range(1, snapshot_take + 1):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile

                # Skip cdrom
                if disk_xml.device == "cdrom":
                    continue
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if 'file' in disk_xml.source.attrs:
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0],
                                              count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif ('name' in disk_xml.source.attrs and
                      disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0],
                                              count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif ('dev' in disk_xml.source.attrs or
                      'name' in disk_xml.source.attrs):
                    if (disk_xml.type_name == 'block' or
                            disk_src_protocol in ['iscsi', 'rbd']):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if 'dev' in new_attrs:
                            del new_attrs['dev']
                        elif 'name' in new_attrs:
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                test.fail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        firt_disk_src = first_device['source']
        return firt_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.

        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                   % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                test.cancel("Snapshot on glusterfs not"
                            " support in current "
                            "version. Check more info "
                            " with https://bugzilla.re"
                            "dhat.com/show_bug.cgi?id="
                            "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide ceph host first.")
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot(snapshot_take)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4) or
                disk_src_protocol == 'gluster'):
            # For libvirt is older version than 1.2.4 or source protocol is gluster
            # there are various base image,which depends on base option:shallow,base,top respectively
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        if backing_file_relative_path:
            # Use block commit to shorten previous snapshots.
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            for count in range(1, snapshot_take + 1):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            #Use block pull with --keep-relative flag,and reset base_index to 2.
            base_index = 2
            for count in range(1, snapshot_take):
                # If block pull operations are more than or equal to 3,it need reset base_index to 1.
                if count >= 3:
                    base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
                blockpull_options = "  --wait --verbose --base %s --keep-relative" % base_image
                res = virsh.blockpull(vm_name, blk_target,
                                      blockpull_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            # Check final backing chain files.
            check_chain_backing_files(blk_source_image, True)
            return
        # Run test case
        result = virsh.blockpull(vm_name, blk_target,
                                 blockpull_options, **virsh_dargs)
        status = result.exit_status

        # If pull job aborted as timeout, the exit status is different
        # on RHEL6(0) and RHEL7(1)
        if with_timeout and 'Pull aborted' in result.stdout.strip():
            if libvirt_version.version_compare(1, 1, 1):
                status_error = True
            else:
                status_error = False

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name, snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s"
                          % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)
                elif "base" or "shallow" in base_option:
                    chain_lst = snap_src_lst[::-1]
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)
                    val_tmp = []
                    for i in range(1, base_index):
                        val_tmp.append(chain_lst[i])
                    for i in val_tmp:
                        chain_lst.remove(i)
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                test.fail("blockpull failed: %s" % output)

    finally:
        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
コード例 #28
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of disk

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def reboot():
        """
        Shutdown and restart guest, then wait for login
        """
        vm.destroy()
        vm.start()
        vm.wait_for_login()

    def attach(xml, device_name, plug_method="hot"):
        """
        Attach device with xml, for both hot and cold plug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for plug method
        """
        device_before_plug = find_device(vm, params)
        with open(xml) as disk_file:
            logging.debug("Attach disk by XML: %s", disk_file.read())
        file_arg = xml
        if plug_method == "cold":
            file_arg += ' --config'
        s_attach = virsh.attach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        libvirt.check_exit_status(s_attach)
        if plug_method == "cold":
            reboot()
        detect_time = params.get("detect_disk_time", 20)
        plug_disks = utils_misc.wait_for(
            lambda: get_new_device(device_before_plug, find_device(vm, params)
                                   ), detect_time)
        if not plug_disks:
            test.fail("Failed to hotplug device %s to guest" % device_name)

    def detach(xml, device_name, unplug_method="hot"):
        """
        Detach device with xml, for both hot and cold unplug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for unplug method
        """
        with open(xml) as disk_file:
            logging.debug("Detach device by XML: %s", disk_file.read())
        file_arg = xml
        if unplug_method == "cold":
            file_arg = xml + ' --config'
        s_detach = virsh.detach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        if unplug_method == "cold":
            reboot()
        libvirt.check_exit_status(s_detach)

    def attach_disk():  # pylint: disable=W0611
        """
        Sub test for attach disk, including hot and cold plug/unplug
        """
        plug_method = params.get("plug_method", "hot")
        device_source_format = params.get("at_disk_source_format", "raw")
        device_target = params.get("at_disk_target", "vdb")
        device_disk_bus = params.get("at_disk_bus", "virtio")
        device_source_name = params.get("at_disk_source", "attach.img")
        detect_time = params.get("detect_disk_time", 10)
        device_source_path = os.path.join(tmp_dir, device_source_name)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1",
            disk_format=device_source_format)

        def _generate_disk_xml():
            """Generate xml for device hotplug/unplug usage"""
            diskxml = devices.disk.Disk("file")
            diskxml.device = "disk"
            source_params = {"attrs": {'file': device_source}}
            diskxml.source = diskxml.new_disk_source(**source_params)
            diskxml.target = {'dev': device_target, 'bus': device_disk_bus}
            if params.get("disk_model"):
                diskxml.model = params.get("disk_model")
            if pci_bridge_index and device_disk_bus == 'virtio':
                addr = diskxml.new_disk_address('pci')
                addr.set_attrs({'bus': pci_bridge_index, 'slot': slot})
                diskxml.address = addr
            return diskxml.xml

        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        slot = get_free_slot(pci_bridge_index, v_xml)
        disk_xml = _generate_disk_xml()
        attach(disk_xml, device_target, plug_method)
        if plug_method == "cold":
            disk_xml = _generate_disk_xml()
        detach(disk_xml, device_target, plug_method)
        if not utils_misc.wait_for(
                lambda: not libvirt.device_exists(vm, device_target),
                detect_time):
            test.fail("Detach disk failed.")

    def attach_controller():  # pylint: disable=W0611
        """
        Sub test for attach controller
        """
        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        contr_index = len(v_xml.get_controllers('scsi'))
        contr_type = params.get("controller_type", 'scsi')
        contr_model = params.get("controller_model", "virtio-scsi")
        contr_dict = {
            'controller_type': contr_type,
            'controller_model': contr_model,
            'controller_index': contr_index
        }
        if pci_bridge_index:
            slot = get_free_slot(pci_bridge_index, v_xml)
            addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot)
            contr_dict.update({'controller_addr': addr})
        xml = libvirt.create_controller_xml(contr_dict=contr_dict)
        attach(xml, params['controller_model'])
        xml = libvirt.create_controller_xml(contr_dict=contr_dict)
        detach(xml, params['controller_model'])

    def snapshot():  # pylint: disable=W0611
        """
        Sub test for snapshot
        """
        for i in range(1, 4):
            ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i)
            libvirt.check_exit_status(ret)
        process.system("systemctl restart libvirtd")
        save_path = os.path.join(tmp_dir, "test.save")
        ret = virsh.save(vm_name, save_path)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_path)
        libvirt.check_exit_status(ret)
        session = vm.wait_for_login()
        session.close()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    pci_bridge_index = None
    tmp_dir = data_dir.get_tmp_dir()
    guest_src_url = params.get("guest_src_url")

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    if guest_src_url:

        def _download():
            download_cmd = ("wget %s -O %s" % (guest_src_url, target_path))
            if process.system(download_cmd, shell=True):
                test.error("Failed to download file")

        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            if utils_package.package_install("wget"):
                utils_misc.wait_for(_download, timeout=360)
            else:
                test.error("Fail to install wget")
        params["blk_source_name"] = target_path

    if add_pcie_to_pci_bridge:
        pci_controllers = vmxml.get_controllers('pci')
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-to-pci-bridge':
                pci_bridge = controller
                break
        else:
            contr_dict = {
                'controller_type': 'pci',
                'controller_model': 'pcie-to-pci-bridge'
            }
            pci_bridge = libvirt.create_controller_xml(contr_dict,
                                                       "add_controller",
                                                       vm_name)
        pci_bridge_index = '%0#4x' % int(pci_bridge.get("index"))
    try:
        if (params["os_variant"] == 'rhel6'
                or 'rhel6' in params.get("shortname")):
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        if pci_bridge_index:
            v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if params.get("disk_target_bus") == "scsi":
                scsi_controllers = v_xml.get_controllers('scsi')
                for index, controller in enumerate(scsi_controllers):
                    controller.find('address').set('bus', pci_bridge_index)
                    controller.find('address').set(
                        'slot', get_free_slot(pci_bridge_index, v_xml))
            else:
                disks = v_xml.get_devices(device_type="disk")
                for index, disk in enumerate(disks):
                    args = {
                        'bus': pci_bridge_index,
                        'slot': get_free_slot(pci_bridge_index, v_xml)
                    }
                    libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address',
                                          args)
            v_xml.xmltreefile.write()
            v_xml.sync()
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        test_step = params.get("sub_test_step")
        if test_step:
            eval(test_step)()
    finally:
        vm.destroy()
        libvirt.clean_up_snapshots(vm_name)
        backup_xml.sync()
コード例 #29
0
                raise exceptions.TestFail("VM failed to start."
                                          "Error: %s" % str(e))
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    raise exceptions.TestFail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(test.tmpdir, "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
コード例 #30
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)
    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
コード例 #31
0
def run(test, params, env):
    """
    Test for virt-xml-validate
    """
    # Get the full path of virt-xml-validate command.
    try:
        VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate")
    except ValueError:
        raise error.TestNAError("Not find virt-xml-validate command on host.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    net_name = params.get("net_dumpxml_name", "default")
    pool_name = params.get("pool_dumpxml_name", "default")
    schema = params.get("schema", "domain")
    output = params.get("output_file", "output")
    output_path = os.path.join(data_dir.get_tmp_dir(), output)

    valid_schemas = [
        'domain', 'domainsnapshot', 'network', 'storagepool', 'storagevol',
        'nodedev', 'capability', 'nwfilter', 'secret', 'interface'
    ]
    if schema not in valid_schemas:
        raise error.TestFail("invalid %s specified" % schema)

    virsh_dargs = {'ignore_status': True, 'debug': True}
    if schema == "domainsnapshot":
        domainsnapshot_validate(vm_name, file=output_path, **virsh_dargs)
    elif schema == "network":
        network_validate(net_name, file=output_path, **virsh_dargs)
    elif schema == "storagepool":
        storagepool_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "storagevol":
        storagevol_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "nodedev":
        nodedev_validate(file=output_path, **virsh_dargs)
    elif schema == "capability":
        capability_validate(file=output_path, **virsh_dargs)
    elif schema == "nwfilter":
        nwfilter_validate(file=output_path, **virsh_dargs)
    elif schema == "secret":
        secret_validate(file=output_path, **virsh_dargs)
    elif schema == "interface":
        interface_validate(file=output_path, **virsh_dargs)
    else:
        # domain
        virsh.dumpxml(vm_name, to_file=output_path)

    cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema)
    cmd_result = utils.run(cmd, ignore_status=True)

    # Delete snapshots.
    snapshot_lists = virsh.snapshot_list(vm_name)
    if len(snapshot_lists) > 0:
        libvirt.clean_up_snapshots(vm_name, snapshot_lists)
        for snapshot in snapshot_lists:
            virsh.snapshot_delete(vm_name, snapshot, "--metadata")
    if cmd_result.exit_status:
        raise error.TestFail("virt-xml-validate command failed.\n"
                             "Detail: %s." % cmd_result)

    if cmd_result.stdout.count("fail"):
        raise error.TestFail("xml fails to validate\n"
                             "Detail: %s." % cmd_result)
コード例 #32
0
ファイル: libvirt_mem.py プロジェクト: yalzhang/tp-libvirt
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if discard:
            if libvirt_version.version_compare(7, 3, 0):
                cmd = cmd + " | grep " + '\\"discard-data\\":true'
            else:
                cmd += " | grep 'discard-data=yes'"
        elif max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
            if tg_size:
                size = int(tg_size) * 1024
                if huge_pages or discard or cold_plug_discard:
                    cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-file,id=%s' | grep 'size=%s" %
                        (cmd_str, size))
                else:
                    cmd_str = 'mem.\|memory-backend-ram,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                        (cmd_str, size))

                if pg_size:
                    cmd += ",host-nodes=%s" % node_mask
                    if numa_memnode:
                        for node in numa_memnode:
                            if ('nodeset' in node
                                    and node['nodeset'] in node_mask):
                                cmd += ",policy=%s" % node['mode']
                    cmd += ".*pc-dimm,node=%s" % tg_node
                if mem_addr:
                    cmd += (".*slot=%s" % (mem_addr['slot']))
                cmd += "'"
            if cold_plug_discard:
                cmd += " | grep 'discard-data=yes'"

        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem),
            30,
            first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem)
            logging.info("detach_device is %s", detach_device)
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 262144:
                logging.error('%s not align to 256', key)
                if key == 'currentMemory':
                    continue
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info(
                'Check Pass: Memory is equal to (all numa memory + memory device)'
            )
        else:
            test.fail(
                'Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if max_mem:
            vmxml.max_mem = int(max_mem)
        if cur_mem:
            vmxml.current_mem = int(cur_mem)
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells)
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages or discard or cold_plug_discard:
            membacking = vm_xml.VMMemBackingXML()
            membacking.discard = True
            membacking.source = ''
            membacking.source_type = 'file'
            if huge_pages:
                hugepages = vm_xml.VMHugepagesXML()
                pagexml_list = []
                for i in range(len(huge_pages)):
                    pagexml = hugepages.PageXML()
                    pagexml.update(huge_pages[i])
                    pagexml_list.append(pagexml)
                hugepages.pages = pagexml_list
                membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    detach_alias = "yes" == params.get("detach_alias", "no")
    detach_alias_options = params.get("detach_alias_options")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    wait_before_save_secs = int(params.get("wait_before_save_secs", 0))
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))
    discard = "yes" == params.get("discard", "no")
    cold_plug_discard = "yes" == params.get("cold_plug_discard", "no")
    if cold_plug_discard or discard:
        mem_discard = 'yes'
    else:
        mem_discard = 'no'

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    huge_page_num = int(params.get('huge_page_num', 2000))
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        cpu_arch = cpu_util.get_family() if hasattr(cpu_util, 'get_family')\
            else cpu_util.get_cpu_arch()
        if cpu_arch == 'power8':
            pg_size = '16384'
            huge_page_num = 200
        elif cpu_arch == 'power9':
            pg_size = '2048'
            huge_page_num = 2000
        [x.update({'size': pg_size}) for x in huge_pages]
        setup_hugepages(int(pg_size), shp_num=huge_page_num)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()
        numa_info = utils_misc.NumaInfo()
        logging.debug(numa_info.get_all_node_meminfo())

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        elif discard:
            vm.start()
            session = vm.wait_for_login()
            check_qemu_cmd(max_mem_rt, tg_size)
        dev_xml = None

        # To attach the memory device.
        if (add_mem_device and not hot_plug) or cold_plug_discard:
            at_times = int(params.get("attach_times", 1))
            randvar = 0
            if rand_reboot:
                rand_value = random.randint(15, 25)
                logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                device_alias = "ua-" + str(uuid.uuid4())
                dev_xml = utils_hotplug.create_mem_xml(
                    tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node,
                    node_mask, mem_model, mem_discard, device_alias)
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    vm.reboot()
                    vm.wait_for_login()
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail(
                    'Memory after attach not equal to original mem + attached mem'
                )

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            # Increase the memory consumed to  1500
            consume_vm_mem(1500)
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            # Wait 10s for vm to be ready before managedsave
            time.sleep(wait_before_save_secs)
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)

            def _wait_for_vm_start():
                try:
                    vm.start()
                    return True
                except Exception as e:
                    logging.error(e)

            utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5)
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            # Wait 10s for vm to be ready before save
            time.sleep(wait_before_save_secs)
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model, mem_discard)
            for x in xrange(at_times):
                if not detach_alias:
                    ret = virsh.detach_device(vm_name,
                                              dev_xml.xml,
                                              flagstr=attach_option,
                                              debug=True)
                else:
                    ret = virsh.detach_device_alias(vm_name,
                                                    device_alias,
                                                    detach_alias_options,
                                                    debug=True)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occurred in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could
                            #  not be unplugged.The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occurred or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occurred, while hot"
                                " unplug: %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)
    except xcepts.LibvirtXMLError:
        if define_error:
            pass
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
コード例 #33
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
コード例 #34
0
def run(test, params, env):
    """
    Test virsh blockcopy --xml option.

    1.Prepare backend storage (file/block/iscsi/ceph/nbd)
    2.Start VM
    3.Prepare target xml
    4.Execute virsh blockcopy --xml command
    5.Check VM xml after operation accomplished
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    ignore_check = False

    def check_blockcopy_xml(vm_name, source_image, ignore_check=False):
        """
        Check blockcopy xml in VM.

        :param vm_name: VM name
        :param source_image: source image name.
        :param ignore_check: default is False.
        """
        if ignore_check:
            return
        source_imge_list = []
        blklist = virsh.domblklist(vm_name).stdout_text.splitlines()
        for line in blklist:
            if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')):
                source_imge_list.append(line.split()[-1])
        logging.debug('domblklist %s:\n%s', vm_name, source_imge_list)
        if not any(source_image in s for s in source_imge_list):
            test.fail("Cannot find expected source image: %s" % source_image)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    blockcopy_option = params.get("blockcopy_option")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    disk_auth_dict = {}
    size = "1"

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            if blockcopy_option in ['reuse_external']:
                device_source = libvirt.create_local_disk(
                    backend_storage_type, disk_path, storage_size,
                    device_format)
            else:
                device_source = disk_path
            disks_img.append({
                "format": device_format,
                "source": disk_path,
                "path": disk_path
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
            checkout_device_source = image_filename
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
                checkout_device_source = device_source
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True,
                    is_login=False,
                    image_size=storage_size,
                    chap_user=chap_user,
                    chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {
                    "auth_user": chap_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_usage": auth_sec_usage_target
                }
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
                checkout_device_source = 'emulated-iscsi'
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            size = "0.15"

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm whether it needs delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {
                    "sec_usage": auth_sec_usage_type,
                    "sec_name": "ceph_auth_secret"
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       ceph_auth_key,
                                       ignore_status=False,
                                       debug=True)
                disk_auth_dict = {
                    "auth_user": ceph_auth_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_uuid": auth_sec_uuid
                }
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                ceph_disk_name, ceph_mon_ip, key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            if blockcopy_option in ['reuse_external']:
                # Create an local image and make FS on it.
                libvirt.create_local_disk("file", img_file, storage_size,
                                          device_format)
                # Convert the image to remote storage
                disk_path = ("rbd:%s:mon_host=%s" %
                             (ceph_disk_name, ceph_mon_ip))
                if ceph_client_name and ceph_client_key:
                    disk_path += (":id=%s:key=%s" %
                                  (ceph_auth_user, ceph_auth_key))
                rbd_cmd = (
                    "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                    " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                   device_format, img_file, disk_path))
                process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
            checkout_device_source = ceph_disk_name
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = socket.gethostname().strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image",
                                    "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path,
                            image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({
                "hosts": [{
                    "name": nbd_server_host,
                    "port": nbd_server_port
                }]
            })
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port, image_path)
            checkout_device_source = image_path
            if blockcopy_option in ['pivot']:
                ignore_check = True

        logging.debug("device source is: %s", device_source)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        auth_in_source = True
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw")
        tmp_device_source = libvirt.create_local_disk("file",
                                                      path=device_source_path,
                                                      size=size,
                                                      disk_format="raw")
        s_attach = virsh.attach_disk(vm_name,
                                     tmp_device_source,
                                     device_target,
                                     "--config",
                                     debug=True)
        libvirt.check_exit_status(s_attach)
        try:
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Additional operations before set block threshold
        options = params.get("options",
                             "--pivot --transient-job --verbose --wait")
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 "--xml %s" % disk_xml.xml,
                                 options=options,
                                 debug=True)
        libvirt.check_exit_status(result)
        check_source_image = None
        if blockcopy_option in ['pivot']:
            check_source_image = checkout_device_source
        else:
            check_source_image = tmp_device_source
        check_blockcopy_xml(vm_name, check_source_image, ignore_check)
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To Delete snapshot, destroy vm first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])
        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result.stdout_text)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.error("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
コード例 #35
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd.encode()).decode()
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        if vm.is_alive():
            vm.destroy()
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
コード例 #36
0
ファイル: tpm_device.py プロジェクト: pombredanne/tp-libvirt
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported "
                    "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    test.error("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install("swtpm*"):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {"loader_readonly": "yes",
                         "secure": "yes",
                         "loader_type": "pflash",
                         "loader": loader,
                         "nvram": nvram}
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name,
                                                        True, timeout=360, debug=True)
            if ret_clone.exit_status:
                test.error("Need more than one domains, but error occured when virt-clone.")
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s backend version xml for tpm dev "
                          "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append("-chardev.socket,id=chrtpm,"
                                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" % pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail('swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output("systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel("tpm-crb passthrough only works with host tpm2.0, "
                                    "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output("ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail("Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s", tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if prepare_secret:
                        auth_sec_dict = {"sec_ephemeral": "no",
                                         "sec_private": "yes",
                                         "sec_desc": "sample vTPM secret",
                                         "sec_usage": "vtpm",
                                         "sec_name": "VTPM_example"}
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name, options="--nvram", **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name, ignore_status=True, debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
コード例 #37
0
def run(test, params, env):
    """
    Test virsh blockpull with various option on VM.

    1.Prepare backend storage (iscsi,nbd,file,block)
    2.Start VM
    3.Execute virsh blockpull target command
    4.Check status after operation accomplished
    5.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def setup_iscsi_block_env(params):
        """
        Setup iscsi as block test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("emulated_size", "10G")
        chap_user = params.get("iscsi_user")
        chap_passwd = params.get("iscsi_password")
        auth_sec_usage_type = params.get("secret_usage_type")
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            chap_passwd.encode(encoding)).decode(encoding)

        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")

        auth_sec_uuid = libvirt_ceph_utils._create_secret(
            auth_sec_usage_type, secret_string)
        disk_auth_dict = {
            "auth_user": chap_user,
            "secret_type": auth_sec_usage_type,
            "secret_uuid": auth_sec_uuid
        }

        disk_src_dict = {'attrs': {'dev': device_source}}
        iscsi_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, disk_auth_dict)
        # Add disk xml.
        logging.debug("disk xml is:\n%s" % iscsi_disk)
        # Sync VM xml.
        vmxml.add_device(iscsi_disk)
        vmxml.sync()

    def setup_file_env(params):
        """
        Setup file test environment

        :param params: one dict to wrap up parameters
        """
        # If additional_disk is False, it means that there is no need to create additional disk
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        backstore_image_target_path = params.get("backstore_image_name")
        tmp_blkpull_path.append(backstore_image_target_path)
        libvirt.create_local_disk("file", backstore_image_target_path, "1",
                                  "qcow2")
        backing_chain_list.append(backstore_image_target_path)

        disk_src_dict = {"attrs": {"file": backstore_image_target_path}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def _generate_backstore_attribute(params):
        """
        Create one disk with backingStore attribute by creating snapshot

        :param params: one dict to wrap up parameters
        """
        device_target = params.get("virt_disk_device_target")
        top_file_image_name = params.get("top_file_image_name")
        second_file_image_name = params.get("second_file_image_name")
        tmp_blkpull_path.append(top_file_image_name)
        tmp_blkpull_path.append(second_file_image_name)
        backing_chain_list.append(top_file_image_name)
        if vm.is_dead():
            vm.start()
        snapshot_tmp_name = "blockpull_tmp_snap"
        options = " %s --disk-only --diskspec %s,file=%s" % (
            snapshot_tmp_name, 'vda', second_file_image_name)
        options += " --diskspec %s,file=%s" % (device_target,
                                               top_file_image_name)
        virsh.snapshot_create_as(vm_name,
                                 options,
                                 ignore_status=False,
                                 debug=True)
        vm.destroy()
        virsh.snapshot_delete(vm_name,
                              snapshot_tmp_name,
                              "--metadata",
                              ignore_status=False,
                              debug=True)
        vmxml_dir = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("backstore prepare readiness :\n%s", vmxml_dir)

    def setup_block_env(params):
        """
        Setup block test environment

        :param params: one dict to wrap up parameters
        """
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
        disk_src_dict = {'attrs': {'dev': device_source}}
        backing_chain_list.append(device_source)

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def setup_nbd_env(params):
        """
        Setup nbd test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port", "10001")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        enable_ga_agent = "yes" == params.get("enable_ga_agent", "no")

        # Create NbdExport object
        nbd = NbdExport(image_path,
                        image_format=device_format,
                        port=nbd_server_port)
        nbd.start_nbd_server()

        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % "no"}

        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        network_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)

        logging.debug("disk xml is:\n%s" % network_disk)
        # Sync VM xml.
        vmxml.add_device(network_disk)
        vmxml.sync()
        if enable_ga_agent:
            vm.prepare_guest_agent()
            vm.destroy(gracefully=False)

    def check_chain_backing_files(chain_list, disk_target):
        """
        Check chain backing files

        :param chain_list: list, expected backing chain list
        :param disk_target: disk target
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n",
                      disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        backingstore_list.pop()
        parse_source_file_list = [
            elem.find('source').get('file') or elem.find('source').get('name')
            or elem.find('source').get('dev') for elem in backingstore_list
        ]
        logging.debug("before expected backing chain list is %s", chain_list)
        chain_list = chain_list[0:3]
        if backend_storage_type == "nbd":
            chain_list = chain_list[0:1]
        chain_list = chain_list[::-1]
        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether two are equals
        if blockpull_option in ['keep_relative']:
            if parse_source_file_list[-1] != chain_list[-1]:
                test.fail(
                    "checked backchain list in last element is not equals to expected one"
                )
        elif parse_source_file_list != chain_list:
            test.fail("checked backchain list is not equals to expected one")

    def _extend_blkpull_execution(base=None,
                                  status_error=False,
                                  err_msg=None,
                                  expected_msg=None):
        """
        Wrap up blockpull execution combining with various options

        :params base: specific base
        :params status_error: expected error or not
        :params err_msg: error message if blockpull command fail
        :params expected_msg: jobinfo expected message if checked
        """
        blockpull_options = params.get("options")
        if '--base' in blockpull_options:
            if base:
                blockpull_options = params.get("options") % base
            else:
                blockpull_options = params.get(
                    "options") % external_snapshot_disks[0]
        result = virsh.blockpull(vm_name,
                                 device_target,
                                 blockpull_options,
                                 ignore_status=True,
                                 debug=True)
        libvirt.check_exit_status(result, expect_error=status_error)
        if status_error:
            if err_msg not in result.stdout_text and err_msg not in result.stderr_text:
                test.fail(
                    "Can not find failed message in standard output: %s or : %s"
                    % (result.stdout_text, result.stderr_text))
        res = virsh.blockjob(vm_name, device_target, "--info").stdout.strip()
        logging.debug("virsh jobinfo is :%s\n", res)
        if expected_msg:
            job_msg = expected_msg
        else:
            job_msg = "No current block job for %s" % device_target
        if res and job_msg not in res:
            test.fail("Find unexpected block job information in %s" % res)

    def start_async_blkpull_on_vm():
        """Start blockpull with async"""
        context_msg = "Pull aborted"
        _extend_blkpull_execution(None, True, context_msg)

    def start_bandwidth_blkpull_on_vm():
        """Start blockpull with bandwidth option """
        _extend_blkpull_execution()

    def start_timeout_blkpull_on_vm():
        """Start blockpull with timeout option """
        _extend_blkpull_execution()

    def start_middle_to_top_to_base_on_vm():
        """Start blockpull from middle to top """
        _extend_blkpull_execution()
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_reuse_external_blkpull_on_vm():
        """Start blockpull with reuse_external """
        _extend_blkpull_execution(base=backing_chain_list[1])
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_top_as_base_blkpull_on_vm():
        """Start blockpull with top as base """
        error_msg = "error: invalid argument"
        _extend_blkpull_execution(base=backing_chain_list[-1],
                                  status_error=True,
                                  err_msg=error_msg)

    def start_base_to_top_blkpull_on_vm():
        """Start blockpull with base as top """
        _extend_blkpull_execution()

    def start_middletotop_blkpull_on_vm():
        """start middletotop blockpull on vm """
        _extend_blkpull_execution()
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))

    # Disk specific attributes.
    type_name = params.get("virt_disk_device_type")
    disk_device = params.get("virt_disk_device")
    device_target = params.get("virt_disk_device_target")
    device_bus = params.get("virt_disk_device_bus")
    device_format = params.get("virt_disk_device_format")

    blockpull_option = params.get("blockpull_option")
    options_value = params.get("options_value")
    backend_storage_type = params.get("backend_storage_type")
    backend_path = params.get("backend_path")
    additional_disk = "yes" == params.get("additional_disk", "yes")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    snapshot_take = int(params.get("snapshot_take", "4"))
    fill_in_vm = "yes" == params.get("fill_in_vm", "no")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    pre_set_root_dir = os.path.dirname(first_src_file)
    replace_disk_image = None

    # Additional disk images.
    tmp_dir = data_dir.get_data_dir()
    tmp_blkpull_path = []
    disks_img = []
    external_snapshot_disks = []
    attach_disk_xml = None
    backing_chain_list = []

    # Initialize one NbdExport object
    nbd = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        utils_secret.clean_up_secrets()

        # Setup backend storage
        if backend_storage_type == "iscsi":
            setup_iscsi_block_env(params)
        elif backend_storage_type == "file":
            setup_file_env(params)
        elif backend_storage_type == "block":
            setup_block_env(params)
        elif backend_storage_type == "nbd":
            setup_nbd_env(params)
        try:
            vm.start()
            vm.wait_for_login().close()
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))

        if fill_in_vm:
            libvirt_disk.fill_null_in_vm(vm, device_target)

        if backend_path in ['native_path']:
            external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
                vm, device_target, "blockpull_snapshot", snapshot_take)
            backing_chain_list.extend(external_snapshot_disks)
        elif backend_path in ['reuse_external']:
            replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
                vm, pre_set_root_dir, first_src_file, device_format)
            params.update({
                'disk_source_name': replace_disk_image,
                'disk_type': 'file',
                'disk_format': 'qcow2',
                'disk_source_protocol': 'file'
            })
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if blockpull_option in ['middle_to_top']:
            start_middletotop_blkpull_on_vm()

        if blockpull_option in ['async']:
            start_async_blkpull_on_vm()

        if blockpull_option in ['bandwidth']:
            start_bandwidth_blkpull_on_vm()

        if blockpull_option in ['timeout']:
            start_timeout_blkpull_on_vm()

        if blockpull_option in ['middle_to_top_to_base']:
            start_middle_to_top_to_base_on_vm()

        if blockpull_option in ['keep_relative']:
            start_reuse_external_blkpull_on_vm()

        if blockpull_option in ['top_as_base']:
            start_top_as_base_blkpull_on_vm()

        if blockpull_option in ['base_to_top']:
            start_base_to_top_blkpull_on_vm()
    finally:
        # Recover VM.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        utils_secret.clean_up_secrets()
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        # Delete reuse external disk if exists
        for disk in external_snapshot_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up backend storage
        for tmp_path in tmp_blkpull_path:
            if os.path.exists(tmp_path):
                libvirt.delete_local_disk('file', tmp_path)
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if nbd:
            nbd.cleanup()
        # Clean up created folders
        for folder in [
                chr(letter) for letter in range(ord('a'),
                                                ord('a') + 4)
        ]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)
コード例 #38
0
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(snapshot_take):
        """
        Make external snapshots for disks only.

        :param snapshot_take: snapshots taken.
        """
        for count in range(1, snapshot_take + 1):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile

                # Skip cdrom
                if disk_xml.device == "cdrom":
                    continue
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if 'file' in disk_xml.source.attrs:
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0],
                                              count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif ('name' in disk_xml.source.attrs and
                      disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0],
                                              count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif ('dev' in disk_xml.source.attrs or
                      'name' in disk_xml.source.attrs):
                    if (disk_xml.type_name == 'block' or
                            disk_src_protocol in ['iscsi', 'rbd']):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if 'dev' in new_attrs:
                            del new_attrs['dev']
                        elif 'name' in new_attrs:
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                test.fail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        firt_disk_src = first_device['source']
        return firt_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.

        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                   % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                test.cancel("Snapshot on glusterfs not"
                            " support in current "
                            "version. Check more info "
                            " with https://bugzilla.re"
                            "dhat.com/show_bug.cgi?id="
                            "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide ceph host first.")
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot(snapshot_take)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4) or
                disk_src_protocol == 'gluster'):
            # For libvirt is older version than 1.2.4 or source protocol is gluster
            # there are various base image,which depends on base option:shallow,base,top respectively
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        if backing_file_relative_path:
            # Use block commit to shorten previous snapshots.
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            for count in range(1, snapshot_take + 1):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            #Use block pull with --keep-relative flag,and reset base_index to 2.
            base_index = 2
            for count in range(1, snapshot_take):
                # If block pull operations are more than or equal to 3,it need reset base_index to 1.
                if count >= 3:
                    base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
                blockpull_options = "  --wait --verbose --base %s --keep-relative" % base_image
                res = virsh.blockpull(vm_name, blk_target,
                                      blockpull_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)
            # Check final backing chain files.
            check_chain_backing_files(blk_source_image, True)
            return
        # Run test case
        result = virsh.blockpull(vm_name, blk_target,
                                 blockpull_options, **virsh_dargs)
        status = result.exit_status

        # If pull job aborted as timeout, the exit status is different
        # on RHEL6(0) and RHEL7(1)
        if with_timeout and 'Pull aborted' in result.stdout.strip():
            if libvirt_version.version_compare(1, 1, 1):
                status_error = True
            else:
                status_error = False

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name, snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s"
                          % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)
                elif "base" or "shallow" in base_option:
                    chain_lst = snap_src_lst[::-1]
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)
                    val_tmp = []
                    for i in range(1, base_index):
                        val_tmp.append(chain_lst[i])
                    for i in val_tmp:
                        chain_lst.remove(i)
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                test.fail("blockpull failed: %s" % output)

    finally:
        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            logging.info("clean gluster env")
            libvirt.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
コード例 #39
0
def run(test, params, env):
    """
    Test rng device options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def check_rng_xml(xml_set, exists=True):
        """
        Check rng xml in/not in domain xml
        :param xml_set: rng xml object for setting
        :param exists: Check xml exists or not in domain xml

        :return: boolean
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get all current xml rng devices
        xml_devices = vmxml.devices
        rng_devices = xml_devices.by_device_tag("rng")
        logging.debug("rng_devices is %s", rng_devices)

        # check if xml attr same with checking
        try:
            rng_index = xml_devices.index(rng_devices[0])
            xml_get = xml_devices[rng_index]

            if not exists:
                # should be detach device check
                return False
        except IndexError:
            if exists:
                # should be attach device check
                return False
            else:
                logging.info("Can not find rng xml as expected")
                return True

        def get_compare_values(xml_set, xml_get, rng_attr):
            """
            Get set and get value to compare

            :param xml_set: seting xml object
            :param xml_get: getting xml object
            :param rng_attr: attribute of rng device
            :return: set and get value in xml
            """
            try:
                set_value = xml_set[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                set_value = None
            try:
                get_value = xml_get[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                get_value = None
            logging.debug("get xml_set value(%s) is %s, get xml_get value is %s",
                          rng_attr, set_value, get_value)
            return (set_value, get_value)

        match = True
        for rng_attr in xml_set.__slots__:
            set_value, get_value = get_compare_values(xml_set, xml_get, rng_attr)
            logging.debug("rng_attr=%s, set_value=%s, get_value=%s", rng_attr, set_value, get_value)
            if set_value and set_value != get_value:
                if rng_attr == 'backend':
                    for bak_attr in xml_set.backend.__slots__:
                        set_backend, get_backend = get_compare_values(xml_set.backend, xml_get.backend, bak_attr)
                        if set_backend and set_backend != get_backend:
                            if bak_attr == 'source':
                                set_source = xml_set.backend.source
                                get_source = xml_get.backend.source
                                find = False
                                for i in range(len(set_source)):
                                    for j in get_source:
                                        if set(set_source[i].items()).issubset(j.items()):
                                            find = True
                                            break
                                    if not find:
                                        logging.debug("set source(%s) not in get source(%s)",
                                                      set_source[i], get_source)
                                        match = False
                                        break
                                    else:
                                        continue
                            else:
                                logging.debug("set backend(%s)- %s not equal to get backend-%s",
                                              rng_attr, set_backend, get_backend)
                                match = False
                                break
                        else:
                            continue
                        if not match:
                            break
                else:
                    logging.debug("set value(%s)-%s not equal to get value-%s",
                                  rng_attr, set_value, get_value)
                    match = False
                    break
            else:
                continue
            if not match:
                break

        if match:
            logging.info("Find same rng xml as hotpluged")
        else:
            test.fail("Rng xml in VM not same with attached xml")

        return True

    def modify_rng_xml(dparams, sync=True, get_xml=False):
        """
        Modify interface xml options

        :params dparams: parameters for organize xml
        :params sync: whether sync to domain xml, if get_xml is True,
                      then sync will not take effect
        :params get_xml: whether get device xml
        :return: if get_xml=True, return xml file
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_model = dparams.get("backend_model", "random")
        backend_type = dparams.get("backend_type")
        backend_dev = dparams.get("backend_dev", "")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        backend_protocol = dparams.get("backend_protocol")
        rng_alias = dparams.get("rng_alias")
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng_xml = rng.Rng()
        rng_xml.rng_model = rng_model
        if rng_rate:
            rng_xml.rate = ast.literal_eval(rng_rate)
        backend = rng.Rng.Backend()
        backend.backend_model = backend_model
        if backend_type:
            backend.backend_type = backend_type
        if backend_dev:
            backend.backend_dev = backend_dev
        if backend_source_list:
            source_list = [ast.literal_eval(source) for source in
                           backend_source_list]
            backend.source = source_list
        if backend_protocol:
            backend.backend_protocol = backend_protocol
        rng_xml.backend = backend
        if detach_alias:
            rng_xml.alias = dict(name=rng_alias)

        logging.debug("Rng xml: %s", rng_xml)
        if get_xml:
            return rng_xml
        if sync:
            vmxml.add_device(rng_xml)
            vmxml.xmltreefile.write()
            vmxml.sync()
        else:
            status = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<rng.*<\/rng>",
                                      str(rng_xml), re.M
                                      )[0].replace("/", "\/"))])
            if not status:
                test.fail("Failed to edit vm xml")

    def check_qemu_cmd(dparams):
        """
        Verify qemu-kvm command line.
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_type = dparams.get("backend_type")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        cmd = ("ps -ef | grep %s | grep -v grep" % vm_name)
        chardev = src_host = src_port = None
        if backend_type == "tcp":
            chardev = "socket"
        elif backend_type == "udp":
            chardev = "udp"
        for bc_source in backend_source_list:
            source = ast.literal_eval(bc_source)
            if "mode" in source and source['mode'] == "connect":
                src_host = source['host']
                src_port = source['service']

        if chardev and src_host and src_port:
            cmd += (" | grep 'chardev %s,.*host=%s,port=%s'"
                    % (chardev, src_host, src_port))
        if rng_model == "virtio":
            cmd += (" | grep 'device %s'" % dparams.get("rng_device"))
        if rng_rate:
            rate = ast.literal_eval(rng_rate)
            cmd += (" | grep 'max-bytes=%s,period=%s'"
                    % (rate['bytes'], rate['period']))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't see rng option"
                      " in command line")

    def check_host():
        """
        Check random device on host
        """
        backend_dev = params.get("backend_dev")
        if backend_dev:
            cmd = "lsof |grep %s" % backend_dev
            ret = process.run(cmd, ignore_status=True, shell=True)
            if ret.exit_status or not ret.stdout_text.count("qemu"):
                test.fail("Failed to check random device"
                          " on host, command output: %s" %
                          ret.stdout_text)

    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1, debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist"
                      % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)), debug=True)
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name, debug=True)
        if ret.stdout.strip().count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2, debug=True)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name, debug=True)
        if not ret.stdout.strip().count(snapshot_name2):
            test.fail("Failed to find snapshot disk")

    def check_guest_dump(session, exists=True):
        """
        Check guest with hexdump

        :param session: ssh session to guest
        :param exists: check rng device exists/not exists
        """
        check_cmd = "hexdump /dev/hwrng"
        try:
            status = session.cmd_status(check_cmd, 5)

            if status != 0 and exists:
                test.fail("Fail to check hexdump in guest")
            elif not exists:
                logging.info("hexdump cmd failed as expected")
        except aexpect.exceptions.ShellTimeoutError:
            if not exists:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("Hexdump do not fail with error")

    def check_guest(session, expect_fail=False):
        """
        Check random device on guest

        :param session: ssh session to guest
        :param expect_fail: expect the dd cmd pass or fail
        """
        rng_files = (
            "/sys/devices/virtual/misc/hw_random/rng_available",
            "/sys/devices/virtual/misc/hw_random/rng_current")
        rng_avail = session.cmd_output("cat %s" % rng_files[0],
                                       timeout=timeout).strip()
        rng_currt = session.cmd_output("cat %s" % rng_files[1],
                                       timeout=timeout).strip()
        logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt)
        if not rng_currt.count("virtio") or rng_currt not in rng_avail:
            test.fail("Failed to check rng file on guest")

        # Read the random device
        rng_rate = params.get("rng_rate")
        # For rng rate test this command and return in a short time
        # but for other test it will hang
        cmd = ("dd if=/dev/hwrng of=rng.test count=100"
               " && rm -f rng.test")
        try:
            ret, output = session.cmd_status_output(cmd, timeout=timeout)
            if ret and expect_fail:
                logging.info("dd cmd failed as expected")
            elif ret:
                test.fail("Failed to read the random device")
        except aexpect.exceptions.ShellTimeoutError:
            logging.info("dd cmd timeout")
            # Close session as the current session still hang on last cmd
            session.close()
            session = vm.wait_for_login()

            if expect_fail:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("dd cmd do not fail with error")
                # Check if file have data
                size = session.cmd_output("wc -c rng.test").split()[0]
                if int(size) > 0:
                    logging.info("/dev/hwrng is not empty, size %s", size)
                else:
                    test.fail("/dev/hwrng is empty")
        finally:
            session.cmd("rm -f rng.test")

        if rng_rate:
            rate_bytes, rate_period = list(ast.literal_eval(rng_rate).values())
            rate_conf = float(rate_bytes) / (float(rate_period)/1000)
            ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s",
                            output, re.M)
            if not ret:
                test.fail("Can't find rate from output")
            rate_real = float(ret.group(1)) / float(ret.group(2))
            logging.debug("Find rate: %s, config rate: %s",
                          rate_real, rate_conf)
            if rate_real > rate_conf * 1.2:
                test.fail("The rate of reading exceed"
                          " the limitation of configuration")
        if device_num > 1:
            rng_dev = rng_avail.split()
            if len(rng_dev) != device_num:
                test.cancel("Multiple virtio-rng devices are not"
                            " supported on this guest kernel. "
                            "Bug: https://bugzilla.redhat.com/"
                            "show_bug.cgi?id=915335")
            session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1]))
            # Read the random device
            if session.cmd_status(cmd, timeout=timeout):
                test.fail("Failed to read the random device")

    def get_rng_device(guest_arch, rng_model):
        """
        Return the expected rng device in qemu cmd
        :param guest_arch: e.g. x86_64
        :param rng_model: the value for //rng@model, e.g. "virtio"
        :return: expected device type in qemu cmd
        """
        if "virtio" in rng_model:
            return "virtio-rng-pci" if "s390x" not in guest_arch else "virtio-rng-ccw"
        else:
            test.fail("Unknown rng model %s" % rng_model)

    start_error = "yes" == params.get("start_error", "no")
    status_error = "yes" == params.get("status_error", "no")

    test_host = "yes" == params.get("test_host", "no")
    test_guest = "yes" == params.get("test_guest", "no")
    test_guest_dump = "yes" == params.get("test_guest_dump", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    snapshot_vm_running = "yes" == params.get("snapshot_vm_running",
                                              "no")
    snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no")
    snapshot_name = params.get("snapshot_name")
    device_num = int(params.get("device_num", 1))
    detach_alias = "yes" == params.get("rng_detach_alias", "no")
    detach_alias_options = params.get("rng_detach_alias_options")
    attach_rng = "yes" == params.get("rng_attach_device", "no")
    attach_options = params.get("rng_attach_options", "")
    random_source = "yes" == params.get("rng_random_source", "yes")
    timeout = int(params.get("timeout", 600))
    wait_timeout = int(params.get("wait_timeout", 60))

    if device_num > 1 and not libvirt_version.version_compare(1, 2, 7):
        test.skip("Multiple virtio-rng devices not "
                  "supported on this libvirt version")

    guest_arch = params.get("vm_arch_name", "x86_64")

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Try to install rng-tools on host, it can speed up random rate
    # if installation failed, ignore the error and continue the test
    if utils_package.package_install(["rng-tools"]):
        rngd_conf = "/etc/sysconfig/rngd"
        rngd_srv = "/usr/lib/systemd/system/rngd.service"
        if os.path.exists(rngd_conf):
            # For rhel6 host, add extraoptions
            with open(rngd_conf, 'w') as f_rng:
                f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"')
        elif os.path.exists(rngd_srv):
            # For rhel7 host, modify start options
            rngd_srv_conf = "/etc/systemd/system/rngd.service"
            if not os.path.exists(rngd_srv_conf):
                shutil.copy(rngd_srv, rngd_srv_conf)
            process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd"
                        " -f -r /dev/urandom -o /dev/random#' %s"
                        % rngd_srv_conf, shell=True)
            process.run('systemctl daemon-reload')
        process.run("service rngd start")

    # Build the xml and run test.
    try:
        bgjob = None

        # Prepare xml, make sure no extra rng dev.
        vmxml = vmxml_backup.copy()
        vmxml.remove_all_device_by_type('rng')
        vmxml.sync()
        logging.debug("Prepared vm xml without rng dev is %s", vmxml)

        # Take snapshot if needed
        if snapshot_name:
            if snapshot_vm_running:
                vm.start()
                vm.wait_for_login().close()
            ret = virsh.snapshot_create_as(vm_name, snapshot_name, debug=True)
            libvirt.check_exit_status(ret)

        # Destroy VM first
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Build vm xml.
        dparams = {}
        if device_num > 1:
            for i in xrange(device_num):
                rng_model = params.get("rng_model_%s" % i, "virtio")
                dparams[i] = {"rng_model": rng_model}
                dparams[i].update({"backend_model": params.get(
                    "backend_model_%s" % i, "random")})
                dparams[i].update({"rng_device": get_rng_device(
                    guest_arch, rng_model)})
                bk_type = params.get("backend_type_%s" % i)
                if bk_type:
                    dparams[i].update({"backend_type": bk_type})
                bk_dev = params.get("backend_dev_%s" % i)
                if bk_dev:
                    dparams[i].update({"backend_dev": bk_dev})
                bk_src = params.get("backend_source_%s" % i)
                if bk_src:
                    dparams[i].update({"backend_source": bk_src})
                bk_pro = params.get("backend_protocol_%s" % i)
                if bk_pro:
                    dparams[i].update({"backend_protocol": bk_pro})
                modify_rng_xml(dparams[i], False)
        else:
            params.update({"rng_device": get_rng_device(
                guest_arch, params.get("rng_model", "virtio"))})

            if detach_alias:
                device_alias = "ua-" + str(uuid.uuid4())
                params.update({"rng_alias": device_alias})

            rng_xml = modify_rng_xml(params, not test_snapshot, attach_rng)

        try:
            # Add random server
            if random_source and params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)

            vm.start()
            if attach_rng:
                ret = virsh.attach_device(vm_name, rng_xml.xml,
                                          flagstr=attach_options,
                                          debug=True, ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if status_error:
                    return
                if not check_rng_xml(rng_xml, True):
                    test.fail("Can not find rng device in xml")

            else:
                # Start the VM.
                if start_error:
                    test.fail("VM started unexpectedly")

            if test_qemu_cmd and not attach_rng:
                if device_num > 1:
                    for i in xrange(device_num):
                        check_qemu_cmd(dparams[i])
                else:
                    check_qemu_cmd(params)
            if test_host:
                check_host()
            session = vm.wait_for_login()
            if test_guest:
                check_guest(session)
            if test_guest_dump:
                check_guest_dump(session, True)
            if test_snapshot:
                check_snapshot(bgjob)

            if detach_alias:
                result = virsh.detach_device_alias(vm_name, device_alias,
                                                   detach_alias_options, debug=True)
                if "--config" in detach_alias_options:
                    vm.destroy()

                def have_rng_xml():
                    """
                    check if xml have rng item
                    """
                    output = virsh.dumpxml(vm_name)
                    return not output.stdout.strip().count("<rng model=")

                if utils_misc.wait_for(have_rng_xml, wait_timeout):
                    logging.info("Cannot find rng device in xml after detach")
                else:
                    test.fail("Found rng device in xml after detach")

            # Detach after attach
            if attach_rng:
                ret = virsh.detach_device(vm_name, rng_xml.xml,
                                          flagstr=attach_options,
                                          debug=True, ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if utils_misc.wait_for(lambda: check_rng_xml(rng_xml, False), wait_timeout):
                    logging.info("Find same rng xml as hotpluged")
                else:
                    test.fail("Rng device still exists after detach!")

                if test_guest_dump:
                    check_guest_dump(session, False)

            session.close()
        except virt_vm.VMStartError as details:
            logging.info(str(details))
            if not start_error:
                test.fail('VM failed to start, '
                          'please refer to https://bugzilla.'
                          'redhat.com/show_bug.cgi?id=1220252:'
                          '\n%s' % details)
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name, debug=True)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name, snapshot, "--metadata", debug=True)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if bgjob:
            bgjob.kill_func()
コード例 #40
0
ファイル: vcpu_misc.py プロジェクト: vivianQizhu/tp-libvirt
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list."
                      % snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")
    test_operations = params.get("test_operations")
    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode

        # Update vm's cpu
        vmxml.cpu = cpu_xml

        vmxml.sync()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result)

        if expected_str_after_startup:
            libvirt.check_dumpxml(vm, expected_str_after_startup)

    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
コード例 #41
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'"
                    % (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd_str = 'memdimm.\|memory-backend-ram,id=ram-node.'
            cmd += (" | grep 'memory-backend-ram,id=%s' | grep 'size=%s"
                    % (cmd_str, size))
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node and
                                node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 256:
                logging.error('%s not align to 256', key)
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info('Check Pass: Memory is equal to (all numa memory + memory device)')
        else:
            test.fail('Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(utils_numeric.align_value(
                        cells[cell]["memory"],
                        align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [ast.literal_eval(x)
                  for x in params.get("huge_pages", "").split()]
    numa_memnode = [ast.literal_eval(x)
                    for x in params.get("numa_memnode", "").split()]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        setup_hugepages(int(pg_size))

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device and not hot_plug:
            at_times = int(params.get("attach_times", 1))
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit, tg_node,
                                                   node_mask, mem_model)
            randvar = 0
            rand_value = random.randint(15, 25)
            logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)
                    vm.reboot()
                    vm.wait_for_login()

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml,
                            **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem,
                                  flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size,
                                                   mem_addr, tg_sizeunit,
                                                   pg_unit, tg_node,
                                                   node_mask, mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail('Memory after attach not equal to original mem + attached mem')

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config") and
                not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            if not dev_xml:
                dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size,
                                                       mem_addr, tg_sizeunit,
                                                       pg_unit, tg_node,
                                                       node_mask, mem_model)
            for x in xrange(at_times):
                ret = virsh.detach_device(vm_name, dev_xml.xml,
                                          flagstr=attach_option)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug("Known error occured in Host, while"
                                          " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session, level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could not be unplugged
                            # The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True, session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug("Known error occured, while hot unplug"
                                          ": %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()