def start_pivot_blkcpy_on_transient_vm():
     """
     Start blockcopy with pivot option
     """
     external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
         vm, device_target, "trans_snapshot", snapshot_take)
     logging.debug("external snapshots:%s\n", external_snapshot_disks)
     external_snapshot_disks.pop()
     for sub_option in ["--shallow --pivot", "--pivot"]:
         tmp_copy_path = os.path.join(
             data_dir.get_data_dir(),
             "%s_%s.img" % (vm_name, sub_option[2:5]))
         tmp_blkcopy_path.append(tmp_copy_path)
         if os.path.exists(tmp_copy_path):
             libvirt.delete_local_disk('file', tmp_copy_path)
         virsh.blockcopy(vm_name,
                         device_target,
                         tmp_copy_path,
                         options=sub_option,
                         ignore_status=False,
                         debug=True)
         back_chain_files = libvirt_disk.get_chain_backing_files(
             tmp_copy_path)
         back_chain_files = back_chain_files[1:len(back_chain_files)]
         logging.debug("debug blockcopy xml restore:%s and %s\n",
                       external_snapshot_disks, back_chain_files)
         if back_chain_files != external_snapshot_disks:
             test.fail("can not get identical backing chain")
         utils_misc.wait_for(
             lambda: libvirt.check_blockjob(vm_name, device_target), 5)
         #After pivot, no backing chain exists
         external_snapshot_disks = []
    def loop_case_in_scenarios(scenarios):
        """
        Loop case scenarios

        :param scenarios: scenario list
        """
        # loop each scenario
        for case, opt in list(scenarios.items()):
            logging.debug("Begin scenario: %s testing....................",
                          case)
            reverse = False
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Reset VM to initial state
            vmxml_backup.sync("--snapshots-metadata")
            vm.start()
            vm.wait_for_login()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(
                vm, disk_target, snapshot_prefix, snapshot_take)
            tmp_option = opt.get('blkcomopt')
            top_file = opt.get('top')
            base_file = opt.get('base')
            if 'abort' in case:
                fill_vm_with_contents()
                ignite_blockcommit_thread = threading.Thread(
                    target=virsh.blockcommit,
                    args=(
                        vm_name,
                        disk_target,
                        tmp_option,
                    ),
                    kwargs={
                        'ignore_status': True,
                        'debug': True
                    })
                ignite_blockcommit_thread.start()
                ignite_blockcommit_thread.join(2)
                virsh.blockjob(vm_name,
                               disk_target,
                               " --abort",
                               ignore_status=False)
                reverse = True
            else:
                libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', tmp_option,
                                                       1)
            # Need pivot to make effect
            if "--active" in tmp_option and "--pivot" not in tmp_option:
                virsh.blockjob(vm_name,
                               disk_target,
                               '--pivot',
                               ignore_status=True)
            check_file_not_exists(pre_set_root_dir, top_file, reverse=reverse)
            if 'top' not in case:
                check_backing_chain_file_not_exists(
                    snap_del_disks[len(snap_del_disks) - 1], top_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]
def run(test, params, env):
    """
    Test scenarios: virsh blockcommit with relative path

    1) Prepare test environment.
    2) Create relative path backing chain
    3) Do virsh blockcommit
    4) Check result.
    5) Recover the environments
    """
    def check_chain_backing_files(disk_src_file, expect_backing_list):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_list: backing chain lists.
        """
        # Validate source image doesn't have backing files after active blockcommit
        qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file)
        logging.debug("The actual qemu-img qemu_img_info_backing_chain:%s\n", qemu_img_info_backing_chain)
        logging.debug("The actual qemu-img expect_backing_list:%s\n", expect_backing_list)
        if qemu_img_info_backing_chain != expect_backing_list:
            test.fail("The backing files by qemu-img is not identical in expected backing list")

    def check_top_image_in_xml(expected_top_image):
        """
        check top image in src file

        :param expected_top_image: expect top image
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk.xmltreefile
                break
        logging.debug("disk xml in top: %s\n", disk_xml)
        for attr in ['file', 'name', 'dev']:
            src_file = disk_xml.find('source').get(attr)
            if src_file:
                break
        if src_file not in expected_top_image:
            test.fail("Current top img %s is not the same with expected: %s" % (src_file, expected_top_image))

    def check_blockcommit_with_bandwidth(chain_list):
        """
        Check blockcommit with bandwidth

        param chain_list: list, expected backing chain list
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n", disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        parse_source_file_list = [elem.find('source').get('file') or elem.find('source').get('name') for elem in backingstore_list]

        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether relative path has been kept
        for i in range(0, len(chain_list)-1):
            if chain_list[i] not in parse_source_file_list[i]:
                test.fail("The relative path parsed from disk xml is different with pre-expected ones")

    def check_file_not_exists(root_dir, file_name, reverse=False):
        """
        Check whether file exists in certain folder

        :param root_dir: preset root directory
        :param file_name:  input file name
        :param reverse: whether reverse the condition
        """
        files_path = [os.path.join(root_dir, f) for f in os.listdir(root_dir)
                      if os.path.isfile(os.path.join(root_dir, f))
                      ]
        logging.debug("all files in folder: %s \n", files_path)
        if not files_path:
            test.fail("Failed to get snapshot files in preset folder")
        elif reverse:
            if file_name not in files_path:
                test.fail("snapshot file:%s can not be found" % file_name)
        else:
            if file_name in files_path:
                test.fail("snapshot file:%s  can not be deleted" % file_name)

    def check_backing_chain_file_not_exists(disk_src_file, file_name, reverse=False):
        """
        Check whether file exists in source file's backing chain

        :param disk_src_file: disk source with backing chain files
        :param file_name: input file name
        :param reverse: whether reverse this condition
        """
        qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file)
        if reverse:
            if file_name not in qemu_img_info_backing_chain:
                test.fail("%s can not be found in backing chain file" % file_name)
        else:
            if file_name in qemu_img_info_backing_chain:
                test.fail("%s should not be in backing chain file" % file_name)

    def fill_vm_with_contents():
        """ Fill contents in VM """
        logging.info("Filling VM contents...")
        try:
            session = vm.wait_for_login()
            status, output = session.cmd_status_output(
                "dd if=/dev/urandom of=/tmp/bigfile bs=1M count=200")
            logging.info("Fill contents in VM:\n%s", output)
            session.close()
        except Exception as e:
            logging.error(str(e))

    def create_lvm_pool():
        """ create lvm pool"""
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
        pvt.pre_pool(**params)
        capacity = "5G"
        for i in range(1, 5):
            vol_name = 'vol%s' % i
            path = "%s/%s" % (pool_target, vol_name)
            virsh.vol_create_as(vol_name, pool_name, capacity, capacity, "qcow2", debug=True)
            cmd = "qemu-img create -f %s %s %s" % ("qcow2", path, capacity)
            process.run(cmd, ignore_status=False, shell=True)
            volume_path_list.append(path)
            capacity = "2G"

    def setup_iscsi_env():
        """ Setup iscsi environment"""
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("image_size", "10G")
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               portal_ip="127.0.0.1")
        cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
               % ("127.0.0.1", "3260", iscsi_target, lun_num, emulated_size))
        process.run(cmd, shell=True)

        blk_source_image_after_converted = "iscsi://%s:%s/%s/%s" % ("127.0.0.1", "3260", iscsi_target, lun_num)
        # Convert the image from qcow2 to raw
        convert_disk_cmd = ("qemu-img convert"
                            " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
        process.run(convert_disk_cmd, ignore_status=False, shell=True)

        replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
            vm, pre_set_root_dir, blk_source_image_after_converted, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_source_protocol': 'file'})
        return replace_disk_image, blk_source_image_after_converted, backing_chain_list

    def setup_rbd_env():
        """ Set up rbd environment"""
        params.update(
            {"virt_disk_device_target": disk_target,
             "ceph_image_file": first_src_file})
        libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True)
        ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
        ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
        blk_source_image_after_converted = ("rbd:%s:mon_host=%s" %
                                            (ceph_disk_name, ceph_mon_ip))
        replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
            vm, pre_set_root_dir, blk_source_image_after_converted, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        return replace_disk_image, blk_source_image_after_converted, backing_chain_list

    def setup_volume_pool_env():
        """Setup volume pool environment"""
        params.update(
            {"virt_disk_device_target": disk_target})
        create_lvm_pool()

        blk_source_image_after_converted = ("%s" % volume_path_list[0])
        # Convert the image from qcow2 to volume
        convert_disk_cmd = ("qemu-img convert"
                            " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
        process.run(convert_disk_cmd, ignore_status=False, shell=True)
        params.update({'disk_source_name': blk_source_image_after_converted,
                       'disk_type': 'block',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        libvirt.set_vm_disk(vm, params, tmp_dir)
        vm.wait_for_login().close()
        vm.destroy(gracefully=False)
        replace_disk_image, backing_chain_list = libvirt_disk.make_syslink_path_backing_files(
            pre_set_root_dir, volume_path_list, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        blk_source_image_after_converted = os.path.join(pre_set_root_dir, syslink_top_img)
        skip_first_one = True
        return replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list

    def validate_blockcommit_after_libvirtd_restart():
        """Validate blockcommit after libvirtd restart"""
        logging.debug("phase three blockcommit .....")
        counts = 1
        phase_three_blockcommit_options = " --active"
        libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_three_blockcommit_options, counts)
        time.sleep(3)
        # Before restart libvirtd
        mirror_content_before_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target)
        logging.debug(mirror_content_before_restart)
        utils_libvirtd.libvirtd_restart()
        # After restart libvirtd
        mirror_content_after_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target)
        logging.debug(mirror_content_after_restart)
        # Check whether mirror content is identical with previous one
        if mirror_content_before_restart != mirror_content_after_restart:
            test.fail("The mirror part content changed after libvirtd restarted")
        virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)

    def prepare_case_scenarios(snap_del_disks, base_file):
        """
        Prepare case scenarios

        :param snap_del_disks: snapshot list
        :param base_file: base file for snapshot
        """
        index = len(snap_del_disks) - 1
        option = "--top %s --base %s --delete --verbose --wait"
        scenarios = {}
        scenarios.update({"middle-to-middle": {'blkcomopt':
                          option % (snap_del_disks[index - 1], snap_del_disks[index - 2]),
                          'top': snap_del_disks[index - 1],
                          'base': snap_del_disks[index - 2]}})
        scenarios.update({"middle-to-base": {'blkcomopt':
                          option % (snap_del_disks[index - 1], base_file),
                          'top': snap_del_disks[index - 1],
                          'base': base_file}})
        scenarios.update({"top-to-middle": {'blkcomopt':
                          option % (snap_del_disks[index], snap_del_disks[index - 2]) + "  --active",
                          'top': snap_del_disks[index],
                          'base': snap_del_disks[index - 2]}})
        scenarios.update({"top-to-base": {'blkcomopt':
                                          "--top %s --delete --verbose --wait --active --pivot"
                                          % (snap_del_disks[index]),
                                          "top": snap_del_disks[index],
                                          "base": snap_del_disks[index]}})
        scenarios.update({"abort-top-job": {'blkcomopt':
                                            "--top %s --delete --verbose --wait --active --pivot --bandwidth 1"
                                            % (snap_del_disks[index]),
                                            "top": snap_del_disks[index],
                                            "base": snap_del_disks[index]}})
        return scenarios

    def loop_case_in_scenarios(scenarios):
        """
        Loop case scenarios

        :param scenarios: scenario list
        """
        # loop each scenario
        for case, opt in list(scenarios.items()):
            logging.debug("Begin scenario: %s testing....................", case)
            reverse = False
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Reset VM to initial state
            vmxml_backup.sync("--snapshots-metadata")
            vm.start()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take)
            tmp_option = opt.get('blkcomopt')
            top_file = opt.get('top')
            base_file = opt.get('base')
            if 'abort' in case:
                fill_vm_with_contents()
                ignite_blockcommit_thread = threading.Thread(target=virsh.blockcommit,
                                                             args=(vm_name, disk_target, tmp_option,),
                                                             kwargs={'ignore_status': True, 'debug': True})
                ignite_blockcommit_thread.start()
                ignite_blockcommit_thread.join(2)
                virsh.blockjob(vm_name, disk_target, " --abort", ignore_status=False)
                reverse = True
            else:
                libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', tmp_option, 1)
            # Need pivot to make effect
            if "--active" in tmp_option and "--pivot" not in tmp_option:
                virsh.blockjob(vm_name, disk_target, '--pivot', ignore_status=True)
            check_file_not_exists(pre_set_root_dir, top_file, reverse=reverse)
            if 'top' not in case:
                check_backing_chain_file_not_exists(snap_del_disks[len(snap_del_disks) - 1], top_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")

    virsh_dargs = {'debug': True}
    status_error = ("yes" == params.get("status_error", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    validate_delete_option = ("yes" == params.get("validate_delete_option", "no"))

    tmp_dir = data_dir.get_data_dir()
    top_inactive = ("yes" == params.get("top_inactive"))
    base_option = params.get("base_option", "none")
    bandwidth = params.get("blockcommit_bandwidth", "")

    disk_target = params.get("disk_target", "vda")
    disk_format = params.get("disk_format", "qcow2")
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")

    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    pool_type = params.get("pool_type")
    emulated_image = params.get("emulated_image")
    syslink_top_img = params.get("syslink_top_img")
    snapshot_take = int(params.get("snapshot_take", "4"))
    snapshot_prefix = params.get("snapshot_prefix", "snapshot")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    blk_source_image = os.path.basename(first_src_file)
    pre_set_root_dir = os.path.dirname(first_src_file)

    snapshot_external_disks = []
    skip_first_one = False
    snap_del_disks = []
    volume_path_list = []
    kkwargs = params.copy()
    pvt = libvirt.PoolVolumeTest(test, params)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" %
                  vm_name)
    try:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if disk_src_protocol == 'iscsi':
            replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_iscsi_env()
        if disk_src_protocol == "rbd":
            replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_rbd_env()
        if disk_src_protocol == "pool":
            replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list = setup_volume_pool_env()
        libvirt.set_vm_disk(vm, params, tmp_dir)

        # get a vm session before snapshot
        session = vm.wait_for_login()
        old_parts = utils_disk.get_parts_list(session)
        # Check backing files
        check_chain_backing_files(replace_disk_image, backing_chain_list)

        if vm_state == "paused":
            vm.pause()
        # Do phase one blockcommit
        phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative"
        counts = len(backing_chain_list)
        if bandwidth and base_option == "base":
            phase_one_blockcommit_options = "--top vda[1] --base vda[3] --keep-relative --bandwidth %s --active" % bandwidth
        if restart_libvirtd:
            utils_libvirtd.libvirtd_restart()
        if base_option == "shallow":
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
        elif base_option == "base":
            counts = 1
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
            check_blockcommit_with_bandwidth(backing_chain_list[::-1])
            virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)
            # Pivot commits to bottom one of backing chain
            phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative"
            counts = len(backing_chain_list)
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
        #Check top image after phase one block commit
        check_top_image_in_xml(blk_source_image_after_converted)

        # Do snapshots
        _, snapshot_external_disks = libvirt_disk.create_reuse_external_snapshots(
            vm, pre_set_root_dir, skip_first_one, disk_target)
        # Set blockcommit_options
        phase_two_blockcommit_options = "--verbose --keep-relative --shallow --active --pivot"

        # Run phase two blockcommit with snapshots
        counts = len(snapshot_external_disks) - 1
        libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_two_blockcommit_options, counts)
        #Check top image after phase two block commit
        check_top_image_in_xml(snapshot_external_disks)
        # Run dependent restart_libvirtd case
        if restart_libvirtd:
            validate_blockcommit_after_libvirtd_restart()
        # Run dependent validate_delete_option case
        if validate_delete_option:
            # Run blockcommit with snapshots to validate delete option
            # Test scenarios can be referred from https://bugzilla.redhat.com/show_bug.cgi?id=1008350
            logging.debug("Blockcommit with delete option .....")
            base_file = first_src_file
            # Get first attempt snapshot lists
            if vm.is_alive():
                vm.destroy(gracefully=False)
                # Reset VM to initial state
                vmxml_backup.sync("--snapshots-metadata")
                vm.start()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take)
            scenarios = prepare_case_scenarios(snap_del_disks, base_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]
            loop_case_in_scenarios(scenarios)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        # Delete reuse external disk if exists
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Delete snapshot disk
        libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
        # Clean up created folders
        for folder in [chr(letter) for letter in range(ord('a'), ord('a') + 4)]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)

        # Remove ceph config file if created
        if disk_src_protocol == "rbd":
            libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=False)
        elif disk_src_protocol == 'iscsi' or 'iscsi_target' in locals():
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'pool':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
            rm_cmd = "rm -rf %s" % pool_target
            process.run(rm_cmd, shell=True)

        # Recover images xattr if having some
        dirty_images = libvirt_disk.get_images_with_xattr(vm)
        if dirty_images:
            libvirt_disk.clean_images_with_xattr(dirty_images)
            test.error("VM's image(s) having xattr left")
示例#4
0
def run(test, params, env):
    """
    Test virsh blockpull with various option on VM.

    1.Prepare backend storage (iscsi,nbd,file,block)
    2.Start VM
    3.Execute virsh blockpull target command
    4.Check status after operation accomplished
    5.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def setup_iscsi_block_env(params):
        """
        Setup iscsi as block test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("emulated_size", "10G")
        chap_user = params.get("iscsi_user")
        chap_passwd = params.get("iscsi_password")
        auth_sec_usage_type = params.get("secret_usage_type")
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            chap_passwd.encode(encoding)).decode(encoding)

        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")

        auth_sec_uuid = libvirt_ceph_utils._create_secret(
            auth_sec_usage_type, secret_string)
        disk_auth_dict = {
            "auth_user": chap_user,
            "secret_type": auth_sec_usage_type,
            "secret_uuid": auth_sec_uuid
        }

        disk_src_dict = {'attrs': {'dev': device_source}}
        iscsi_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, disk_auth_dict)
        # Add disk xml.
        logging.debug("disk xml is:\n%s" % iscsi_disk)
        # Sync VM xml.
        vmxml.add_device(iscsi_disk)
        vmxml.sync()

    def setup_file_env(params):
        """
        Setup file test environment

        :param params: one dict to wrap up parameters
        """
        # If additional_disk is False, it means that there is no need to create additional disk
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        backstore_image_target_path = params.get("backstore_image_name")
        tmp_blkpull_path.append(backstore_image_target_path)
        libvirt.create_local_disk("file", backstore_image_target_path, "1",
                                  "qcow2")
        backing_chain_list.append(backstore_image_target_path)

        disk_src_dict = {"attrs": {"file": backstore_image_target_path}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def _generate_backstore_attribute(params):
        """
        Create one disk with backingStore attribute by creating snapshot

        :param params: one dict to wrap up parameters
        """
        device_target = params.get("virt_disk_device_target")
        top_file_image_name = params.get("top_file_image_name")
        second_file_image_name = params.get("second_file_image_name")
        tmp_blkpull_path.append(top_file_image_name)
        tmp_blkpull_path.append(second_file_image_name)
        backing_chain_list.append(top_file_image_name)
        if vm.is_dead():
            vm.start()
        snapshot_tmp_name = "blockpull_tmp_snap"
        options = " %s --disk-only --diskspec %s,file=%s" % (
            snapshot_tmp_name, 'vda', second_file_image_name)
        options += " --diskspec %s,file=%s" % (device_target,
                                               top_file_image_name)
        virsh.snapshot_create_as(vm_name,
                                 options,
                                 ignore_status=False,
                                 debug=True)
        vm.destroy()
        virsh.snapshot_delete(vm_name,
                              snapshot_tmp_name,
                              "--metadata",
                              ignore_status=False,
                              debug=True)
        vmxml_dir = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("backstore prepare readiness :\n%s", vmxml_dir)

    def setup_block_env(params):
        """
        Setup block test environment

        :param params: one dict to wrap up parameters
        """
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
        disk_src_dict = {'attrs': {'dev': device_source}}
        backing_chain_list.append(device_source)

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def setup_nbd_env(params):
        """
        Setup nbd test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port", "10001")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        enable_ga_agent = "yes" == params.get("enable_ga_agent", "no")

        # Create NbdExport object
        nbd = NbdExport(image_path,
                        image_format=device_format,
                        port=nbd_server_port)
        nbd.start_nbd_server()

        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % "no"}

        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        network_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)

        logging.debug("disk xml is:\n%s" % network_disk)
        # Sync VM xml.
        vmxml.add_device(network_disk)
        vmxml.sync()
        if enable_ga_agent:
            vm.prepare_guest_agent()
            vm.destroy(gracefully=False)

    def check_chain_backing_files(chain_list, disk_target):
        """
        Check chain backing files

        :param chain_list: list, expected backing chain list
        :param disk_target: disk target
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n",
                      disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        backingstore_list.pop()
        parse_source_file_list = [
            elem.find('source').get('file') or elem.find('source').get('name')
            or elem.find('source').get('dev') for elem in backingstore_list
        ]
        logging.debug("before expected backing chain list is %s", chain_list)
        chain_list = chain_list[0:3]
        if backend_storage_type == "nbd":
            chain_list = chain_list[0:1]
        chain_list = chain_list[::-1]
        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether two are equals
        if blockpull_option in ['keep_relative']:
            if parse_source_file_list[-1] != chain_list[-1]:
                test.fail(
                    "checked backchain list in last element is not equals to expected one"
                )
        elif parse_source_file_list != chain_list:
            test.fail("checked backchain list is not equals to expected one")

    def _extend_blkpull_execution(base=None,
                                  status_error=False,
                                  err_msg=None,
                                  expected_msg=None):
        """
        Wrap up blockpull execution combining with various options

        :params base: specific base
        :params status_error: expected error or not
        :params err_msg: error message if blockpull command fail
        :params expected_msg: jobinfo expected message if checked
        """
        blockpull_options = params.get("options")
        if '--base' in blockpull_options:
            if base:
                blockpull_options = params.get("options") % base
            else:
                blockpull_options = params.get(
                    "options") % external_snapshot_disks[0]
        result = virsh.blockpull(vm_name,
                                 device_target,
                                 blockpull_options,
                                 ignore_status=True,
                                 debug=True)
        libvirt.check_exit_status(result, expect_error=status_error)
        if status_error:
            if err_msg not in result.stdout_text and err_msg not in result.stderr_text:
                test.fail(
                    "Can not find failed message in standard output: %s or : %s"
                    % (result.stdout_text, result.stderr_text))
        res = virsh.blockjob(vm_name, device_target, "--info").stdout.strip()
        logging.debug("virsh jobinfo is :%s\n", res)
        if expected_msg:
            job_msg = expected_msg
        else:
            job_msg = "No current block job for %s" % device_target
        if res and job_msg not in res:
            test.fail("Find unexpected block job information in %s" % res)

    def start_async_blkpull_on_vm():
        """Start blockpull with async"""
        context_msg = "Pull aborted"
        _extend_blkpull_execution(None, True, context_msg)

    def start_bandwidth_blkpull_on_vm():
        """Start blockpull with bandwidth option """
        _extend_blkpull_execution()

    def start_timeout_blkpull_on_vm():
        """Start blockpull with timeout option """
        _extend_blkpull_execution()

    def start_middle_to_top_to_base_on_vm():
        """Start blockpull from middle to top """
        _extend_blkpull_execution()
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_reuse_external_blkpull_on_vm():
        """Start blockpull with reuse_external """
        _extend_blkpull_execution(base=backing_chain_list[1])
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_top_as_base_blkpull_on_vm():
        """Start blockpull with top as base """
        error_msg = "error: invalid argument"
        _extend_blkpull_execution(base=backing_chain_list[-1],
                                  status_error=True,
                                  err_msg=error_msg)

    def start_base_to_top_blkpull_on_vm():
        """Start blockpull with base as top """
        _extend_blkpull_execution()

    def start_middletotop_blkpull_on_vm():
        """start middletotop blockpull on vm """
        _extend_blkpull_execution()
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))

    # Disk specific attributes.
    type_name = params.get("virt_disk_device_type")
    disk_device = params.get("virt_disk_device")
    device_target = params.get("virt_disk_device_target")
    device_bus = params.get("virt_disk_device_bus")
    device_format = params.get("virt_disk_device_format")

    blockpull_option = params.get("blockpull_option")
    options_value = params.get("options_value")
    backend_storage_type = params.get("backend_storage_type")
    backend_path = params.get("backend_path")
    additional_disk = "yes" == params.get("additional_disk", "yes")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    snapshot_take = int(params.get("snapshot_take", "4"))
    fill_in_vm = "yes" == params.get("fill_in_vm", "no")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    pre_set_root_dir = os.path.dirname(first_src_file)
    replace_disk_image = None

    # Additional disk images.
    tmp_dir = data_dir.get_data_dir()
    tmp_blkpull_path = []
    disks_img = []
    external_snapshot_disks = []
    attach_disk_xml = None
    backing_chain_list = []

    # Initialize one NbdExport object
    nbd = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        utils_secret.clean_up_secrets()

        # Setup backend storage
        if backend_storage_type == "iscsi":
            setup_iscsi_block_env(params)
        elif backend_storage_type == "file":
            setup_file_env(params)
        elif backend_storage_type == "block":
            setup_block_env(params)
        elif backend_storage_type == "nbd":
            setup_nbd_env(params)
        try:
            vm.start()
            vm.wait_for_login().close()
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))

        if fill_in_vm:
            libvirt_disk.fill_null_in_vm(vm, device_target)

        if backend_path in ['native_path']:
            external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
                vm, device_target, "blockpull_snapshot", snapshot_take)
            backing_chain_list.extend(external_snapshot_disks)
        elif backend_path in ['reuse_external']:
            replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
                vm, pre_set_root_dir, first_src_file, device_format)
            params.update({
                'disk_source_name': replace_disk_image,
                'disk_type': 'file',
                'disk_format': 'qcow2',
                'disk_source_protocol': 'file'
            })
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if blockpull_option in ['middle_to_top']:
            start_middletotop_blkpull_on_vm()

        if blockpull_option in ['async']:
            start_async_blkpull_on_vm()

        if blockpull_option in ['bandwidth']:
            start_bandwidth_blkpull_on_vm()

        if blockpull_option in ['timeout']:
            start_timeout_blkpull_on_vm()

        if blockpull_option in ['middle_to_top_to_base']:
            start_middle_to_top_to_base_on_vm()

        if blockpull_option in ['keep_relative']:
            start_reuse_external_blkpull_on_vm()

        if blockpull_option in ['top_as_base']:
            start_top_as_base_blkpull_on_vm()

        if blockpull_option in ['base_to_top']:
            start_base_to_top_blkpull_on_vm()
    finally:
        # Recover VM.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        utils_secret.clean_up_secrets()
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        # Delete reuse external disk if exists
        for disk in external_snapshot_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up backend storage
        for tmp_path in tmp_blkpull_path:
            if os.path.exists(tmp_path):
                libvirt.delete_local_disk('file', tmp_path)
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if nbd:
            nbd.cleanup()
        # Clean up created folders
        for folder in [
                chr(letter) for letter in range(ord('a'),
                                                ord('a') + 4)
        ]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)
示例#5
0
def run(test, params, env):
    """
    1) Test Define/undefine/start/destroy/save/restore a OVMF/Seabios domain
    with 'boot dev' element or 'boot order' element
    2) Test Create snapshot with 'boot dev' or 'boot order'

    Steps:
    1) Prepare a typical VM XML, e.g. for OVMF or Seabios Guest boot
    2) Setup boot sequence by element 'boot dev' or 'boot order'
    3) Define/undefine/start/destroy/save/restore VM and check result
    4) Create snapshot with 'boot dev' or 'boot order'
    """
    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    boot_type = params.get("boot_type", "seabios")
    boot_ref = params.get("boot_ref", "dev")
    disk_target_dev = params.get("disk_target_dev", "")
    disk_target_bus = params.get("disk_target_bus", "")
    tmp_file = data_dir.get_data_dir()
    save_file = os.path.join(tmp_file, vm_name + ".save")
    nvram_file = params.get("nvram", "")
    expected_text = params.get("expected_text", None)
    boot_entry = params.get("boot_entry", None)
    with_snapshot = "yes" == params.get("with_snapshot", "no")
    snapshot_take = int(params.get("snapshot_take", "1"))
    postfix = params.get("postfix", "")

    # Back VM XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if boot_type == "ovmf":
        if not libvirt_version.version_compare(2, 0, 0):
            test.error("OVMF doesn't support in current" " libvirt version.")

        if not utils_package.package_install('OVMF'):
            test.error("OVMF package install failed")

    if (boot_type == "seabios"
            and not utils_package.package_install('seabios-bin')):
        test.error("seabios package install failed")

    try:
        prepare_boot_xml(vmxml, params)

        # Update domain disk
        # Use sata for uefi, and ide for seabios
        domain_disk = vmxml.get_devices(device_type='disk')[0]
        domain_disk.target = {"dev": disk_target_dev, "bus": disk_target_bus}
        del domain_disk['address']
        vmxml.remove_all_disk()
        vmxml.add_device(domain_disk)

        # Setup boot start sequence
        vmxml.remove_all_boots()
        if boot_ref == "order":
            vmxml.set_boot_order_by_target_dev(disk_target_dev, "1")
        if boot_ref == "dev":
            vmxml.set_os_attrs(**{"boots": ["hd"]})

        logging.debug("The new VM XML is:\n%s", vmxml)
        vmxml.undefine()

        virsh_dargs = {"debug": True, "ignore_status": True}

        if boot_type == "s390_qemu":
            # Start test and check result
            ret = virsh.define(vmxml.xml, **virsh_dargs)
            ret = virsh.start(vm_name, "--paused", **virsh_dargs)
            time.sleep(1)
            vm.create_serial_console()
            time.sleep(1)
            vm.resume()
            if not boot_entry:
                check_boot = console_check(vm, expected_text)
                if not wait_for(check_boot, 60, 1):
                    test.fail("No boot menu found. Please check log.")
            else:
                vm.serial_console.send(boot_entry)
                time.sleep(0.5)
                vm.serial_console.sendcontrol('m')
                check_boot = console_check(vm, expected_text)
                if not wait_for(check_boot, 60, 1):
                    test.fail("Boot entry not selected. Please check log.")
                vm.wait_for_login()
        else:
            if with_snapshot:
                # Create snapshot for guest with boot dev or boot order
                virsh.define(vmxml.xml, **virsh_dargs)
                virsh.start(vm_name, **virsh_dargs)
                vm.wait_for_login()
                external_snapshot = libvirt_disk.make_external_disk_snapshots(
                    vm, disk_target_dev, postfix, snapshot_take)
                snapshot_list_check(vm_name, snapshot_take, postfix, test)
            else:
                # Test the lifecycle for the guest
                kwargs = {
                    "vm_name": vm_name,
                    "save_file": save_file,
                    "boot_type": boot_type,
                    "nvram_file": nvram_file
                }
                domain_lifecycle(vmxml, vm, test, virsh_dargs, **kwargs)
    finally:
        logging.debug("Start to cleanup")
        if vm.is_alive:
            vm.destroy()
        logging.debug("Restore the VM XML")
        if os.path.exists(save_file):
            os.remove(save_file)
        if with_snapshot:
            vmxml_recover_from_snap(vm_name, boot_type, vmxml_backup, test)
            # Remove the generated snapshot file
            for snap_file in external_snapshot:
                if os.path.exists(snap_file):
                    os.remove(snap_file)
        else:
            vmxml_backup.sync()