예제 #1
0
    def test_blockcopy_synchronous_writes():
        """
        Test blockcopy with --synchronous-writes option.
        """
        ret = virsh.blockcopy(vm_name,
                              device,
                              tmp_copy_path,
                              options=blockcopy_options,
                              ignore_status=False,
                              debug=True)
        if not ret.stdout_text.count("Now in mirroring phase"):
            test.fail("Not in mirroring phase")
        test_obj.new_image_path = tmp_copy_path
        # Check exist mirror tag after blockcopy.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        disk_list = vmxml.get_disk_all()[device]
        if not disk_list.find('mirror'):
            test.fail('No mirror tag in current domain xml :%s' % vmxml)
        else:
            # Check file in mirror should be new copy file
            mirror_file = disk_list.find('mirror').get('file')
            if mirror_file != tmp_copy_path:
                test.fail('Current mirror tag file:%s is not same as:%s' %
                          (mirror_file, tmp_copy_path))
            # Check file in mirror >source > file should be new copy file
            mirror_source_file = disk_list.find('mirror').\
                find('source').get('file')
            if mirror_source_file != tmp_copy_path:
                test.fail('Current source tag file:%s is not same as:%s' %
                          (mirror_source_file, tmp_copy_path))

        # Check domain write file
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session, device)
        session.close()
        # Abort job and check disk source changed.
        virsh.blockjob(vm_name,
                       device,
                       options=' --pivot',
                       debug=True,
                       ignore_status=False)
        current_source = libvirt_disk.get_first_disk_source(vm)
        if current_source != tmp_copy_path:
            test.fail("Current source: %s is not same as original blockcopy"
                      " path:%s" % (current_source, tmp_copy_path))
        # Check domain write file after
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session, device)
        session.close()
def run(test, params, env):
    """
    Test scenarios: virsh blockcommit with relative path

    1) Prepare test environment.
    2) Create relative path backing chain
    3) Do virsh blockcommit
    4) Check result.
    5) Recover the environments
    """
    def check_chain_backing_files(disk_src_file, expect_backing_list):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_list: backing chain lists.
        """
        # Validate source image doesn't have backing files after active blockcommit
        qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file)
        logging.debug("The actual qemu-img qemu_img_info_backing_chain:%s\n", qemu_img_info_backing_chain)
        logging.debug("The actual qemu-img expect_backing_list:%s\n", expect_backing_list)
        if qemu_img_info_backing_chain != expect_backing_list:
            test.fail("The backing files by qemu-img is not identical in expected backing list")

    def check_top_image_in_xml(expected_top_image):
        """
        check top image in src file

        :param expected_top_image: expect top image
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk.xmltreefile
                break
        logging.debug("disk xml in top: %s\n", disk_xml)
        for attr in ['file', 'name', 'dev']:
            src_file = disk_xml.find('source').get(attr)
            if src_file:
                break
        if src_file not in expected_top_image:
            test.fail("Current top img %s is not the same with expected: %s" % (src_file, expected_top_image))

    def check_blockcommit_with_bandwidth(chain_list):
        """
        Check blockcommit with bandwidth

        param chain_list: list, expected backing chain list
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n", disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        parse_source_file_list = [elem.find('source').get('file') or elem.find('source').get('name') for elem in backingstore_list]

        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether relative path has been kept
        for i in range(0, len(chain_list)-1):
            if chain_list[i] not in parse_source_file_list[i]:
                test.fail("The relative path parsed from disk xml is different with pre-expected ones")

    def check_file_not_exists(root_dir, file_name, reverse=False):
        """
        Check whether file exists in certain folder

        :param root_dir: preset root directory
        :param file_name:  input file name
        :param reverse: whether reverse the condition
        """
        files_path = [os.path.join(root_dir, f) for f in os.listdir(root_dir)
                      if os.path.isfile(os.path.join(root_dir, f))
                      ]
        logging.debug("all files in folder: %s \n", files_path)
        if not files_path:
            test.fail("Failed to get snapshot files in preset folder")
        elif reverse:
            if file_name not in files_path:
                test.fail("snapshot file:%s can not be found" % file_name)
        else:
            if file_name in files_path:
                test.fail("snapshot file:%s  can not be deleted" % file_name)

    def check_backing_chain_file_not_exists(disk_src_file, file_name, reverse=False):
        """
        Check whether file exists in source file's backing chain

        :param disk_src_file: disk source with backing chain files
        :param file_name: input file name
        :param reverse: whether reverse this condition
        """
        qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file)
        if reverse:
            if file_name not in qemu_img_info_backing_chain:
                test.fail("%s can not be found in backing chain file" % file_name)
        else:
            if file_name in qemu_img_info_backing_chain:
                test.fail("%s should not be in backing chain file" % file_name)

    def fill_vm_with_contents():
        """ Fill contents in VM """
        logging.info("Filling VM contents...")
        try:
            session = vm.wait_for_login()
            status, output = session.cmd_status_output(
                "dd if=/dev/urandom of=/tmp/bigfile bs=1M count=200")
            logging.info("Fill contents in VM:\n%s", output)
            session.close()
        except Exception as e:
            logging.error(str(e))

    def create_lvm_pool():
        """ create lvm pool"""
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
        pvt.pre_pool(**params)
        capacity = "5G"
        for i in range(1, 5):
            vol_name = 'vol%s' % i
            path = "%s/%s" % (pool_target, vol_name)
            virsh.vol_create_as(vol_name, pool_name, capacity, capacity, "qcow2", debug=True)
            cmd = "qemu-img create -f %s %s %s" % ("qcow2", path, capacity)
            process.run(cmd, ignore_status=False, shell=True)
            volume_path_list.append(path)
            capacity = "2G"

    def setup_iscsi_env():
        """ Setup iscsi environment"""
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("image_size", "10G")
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               portal_ip="127.0.0.1")
        cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
               % ("127.0.0.1", "3260", iscsi_target, lun_num, emulated_size))
        process.run(cmd, shell=True)

        blk_source_image_after_converted = "iscsi://%s:%s/%s/%s" % ("127.0.0.1", "3260", iscsi_target, lun_num)
        # Convert the image from qcow2 to raw
        convert_disk_cmd = ("qemu-img convert"
                            " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
        process.run(convert_disk_cmd, ignore_status=False, shell=True)

        replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
            vm, pre_set_root_dir, blk_source_image_after_converted, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_source_protocol': 'file'})
        return replace_disk_image, blk_source_image_after_converted, backing_chain_list

    def setup_rbd_env():
        """ Set up rbd environment"""
        params.update(
            {"virt_disk_device_target": disk_target,
             "ceph_image_file": first_src_file})
        libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True)
        ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
        ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
        blk_source_image_after_converted = ("rbd:%s:mon_host=%s" %
                                            (ceph_disk_name, ceph_mon_ip))
        replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
            vm, pre_set_root_dir, blk_source_image_after_converted, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        return replace_disk_image, blk_source_image_after_converted, backing_chain_list

    def setup_volume_pool_env():
        """Setup volume pool environment"""
        params.update(
            {"virt_disk_device_target": disk_target})
        create_lvm_pool()

        blk_source_image_after_converted = ("%s" % volume_path_list[0])
        # Convert the image from qcow2 to volume
        convert_disk_cmd = ("qemu-img convert"
                            " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
        process.run(convert_disk_cmd, ignore_status=False, shell=True)
        params.update({'disk_source_name': blk_source_image_after_converted,
                       'disk_type': 'block',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        libvirt.set_vm_disk(vm, params, tmp_dir)
        vm.wait_for_login().close()
        vm.destroy(gracefully=False)
        replace_disk_image, backing_chain_list = libvirt_disk.make_syslink_path_backing_files(
            pre_set_root_dir, volume_path_list, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        blk_source_image_after_converted = os.path.join(pre_set_root_dir, syslink_top_img)
        skip_first_one = True
        return replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list

    def validate_blockcommit_after_libvirtd_restart():
        """Validate blockcommit after libvirtd restart"""
        logging.debug("phase three blockcommit .....")
        counts = 1
        phase_three_blockcommit_options = " --active"
        libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_three_blockcommit_options, counts)
        time.sleep(3)
        # Before restart libvirtd
        mirror_content_before_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target)
        logging.debug(mirror_content_before_restart)
        utils_libvirtd.libvirtd_restart()
        # After restart libvirtd
        mirror_content_after_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target)
        logging.debug(mirror_content_after_restart)
        # Check whether mirror content is identical with previous one
        if mirror_content_before_restart != mirror_content_after_restart:
            test.fail("The mirror part content changed after libvirtd restarted")
        virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)

    def prepare_case_scenarios(snap_del_disks, base_file):
        """
        Prepare case scenarios

        :param snap_del_disks: snapshot list
        :param base_file: base file for snapshot
        """
        index = len(snap_del_disks) - 1
        option = "--top %s --base %s --delete --verbose --wait"
        scenarios = {}
        scenarios.update({"middle-to-middle": {'blkcomopt':
                          option % (snap_del_disks[index - 1], snap_del_disks[index - 2]),
                          'top': snap_del_disks[index - 1],
                          'base': snap_del_disks[index - 2]}})
        scenarios.update({"middle-to-base": {'blkcomopt':
                          option % (snap_del_disks[index - 1], base_file),
                          'top': snap_del_disks[index - 1],
                          'base': base_file}})
        scenarios.update({"top-to-middle": {'blkcomopt':
                          option % (snap_del_disks[index], snap_del_disks[index - 2]) + "  --active",
                          'top': snap_del_disks[index],
                          'base': snap_del_disks[index - 2]}})
        scenarios.update({"top-to-base": {'blkcomopt':
                                          "--top %s --delete --verbose --wait --active --pivot"
                                          % (snap_del_disks[index]),
                                          "top": snap_del_disks[index],
                                          "base": snap_del_disks[index]}})
        scenarios.update({"abort-top-job": {'blkcomopt':
                                            "--top %s --delete --verbose --wait --active --pivot --bandwidth 1"
                                            % (snap_del_disks[index]),
                                            "top": snap_del_disks[index],
                                            "base": snap_del_disks[index]}})
        return scenarios

    def loop_case_in_scenarios(scenarios):
        """
        Loop case scenarios

        :param scenarios: scenario list
        """
        # loop each scenario
        for case, opt in list(scenarios.items()):
            logging.debug("Begin scenario: %s testing....................", case)
            reverse = False
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Reset VM to initial state
            vmxml_backup.sync("--snapshots-metadata")
            vm.start()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take)
            tmp_option = opt.get('blkcomopt')
            top_file = opt.get('top')
            base_file = opt.get('base')
            if 'abort' in case:
                fill_vm_with_contents()
                ignite_blockcommit_thread = threading.Thread(target=virsh.blockcommit,
                                                             args=(vm_name, disk_target, tmp_option,),
                                                             kwargs={'ignore_status': True, 'debug': True})
                ignite_blockcommit_thread.start()
                ignite_blockcommit_thread.join(2)
                virsh.blockjob(vm_name, disk_target, " --abort", ignore_status=False)
                reverse = True
            else:
                libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', tmp_option, 1)
            # Need pivot to make effect
            if "--active" in tmp_option and "--pivot" not in tmp_option:
                virsh.blockjob(vm_name, disk_target, '--pivot', ignore_status=True)
            check_file_not_exists(pre_set_root_dir, top_file, reverse=reverse)
            if 'top' not in case:
                check_backing_chain_file_not_exists(snap_del_disks[len(snap_del_disks) - 1], top_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")

    virsh_dargs = {'debug': True}
    status_error = ("yes" == params.get("status_error", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    validate_delete_option = ("yes" == params.get("validate_delete_option", "no"))

    tmp_dir = data_dir.get_data_dir()
    top_inactive = ("yes" == params.get("top_inactive"))
    base_option = params.get("base_option", "none")
    bandwidth = params.get("blockcommit_bandwidth", "")

    disk_target = params.get("disk_target", "vda")
    disk_format = params.get("disk_format", "qcow2")
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")

    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    pool_type = params.get("pool_type")
    emulated_image = params.get("emulated_image")
    syslink_top_img = params.get("syslink_top_img")
    snapshot_take = int(params.get("snapshot_take", "4"))
    snapshot_prefix = params.get("snapshot_prefix", "snapshot")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    blk_source_image = os.path.basename(first_src_file)
    pre_set_root_dir = os.path.dirname(first_src_file)

    snapshot_external_disks = []
    skip_first_one = False
    snap_del_disks = []
    volume_path_list = []
    kkwargs = params.copy()
    pvt = libvirt.PoolVolumeTest(test, params)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" %
                  vm_name)
    try:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if disk_src_protocol == 'iscsi':
            replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_iscsi_env()
        if disk_src_protocol == "rbd":
            replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_rbd_env()
        if disk_src_protocol == "pool":
            replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list = setup_volume_pool_env()
        libvirt.set_vm_disk(vm, params, tmp_dir)

        # get a vm session before snapshot
        session = vm.wait_for_login()
        old_parts = utils_disk.get_parts_list(session)
        # Check backing files
        check_chain_backing_files(replace_disk_image, backing_chain_list)

        if vm_state == "paused":
            vm.pause()
        # Do phase one blockcommit
        phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative"
        counts = len(backing_chain_list)
        if bandwidth and base_option == "base":
            phase_one_blockcommit_options = "--top vda[1] --base vda[3] --keep-relative --bandwidth %s --active" % bandwidth
        if restart_libvirtd:
            utils_libvirtd.libvirtd_restart()
        if base_option == "shallow":
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
        elif base_option == "base":
            counts = 1
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
            check_blockcommit_with_bandwidth(backing_chain_list[::-1])
            virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)
            # Pivot commits to bottom one of backing chain
            phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative"
            counts = len(backing_chain_list)
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
        #Check top image after phase one block commit
        check_top_image_in_xml(blk_source_image_after_converted)

        # Do snapshots
        _, snapshot_external_disks = libvirt_disk.create_reuse_external_snapshots(
            vm, pre_set_root_dir, skip_first_one, disk_target)
        # Set blockcommit_options
        phase_two_blockcommit_options = "--verbose --keep-relative --shallow --active --pivot"

        # Run phase two blockcommit with snapshots
        counts = len(snapshot_external_disks) - 1
        libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_two_blockcommit_options, counts)
        #Check top image after phase two block commit
        check_top_image_in_xml(snapshot_external_disks)
        # Run dependent restart_libvirtd case
        if restart_libvirtd:
            validate_blockcommit_after_libvirtd_restart()
        # Run dependent validate_delete_option case
        if validate_delete_option:
            # Run blockcommit with snapshots to validate delete option
            # Test scenarios can be referred from https://bugzilla.redhat.com/show_bug.cgi?id=1008350
            logging.debug("Blockcommit with delete option .....")
            base_file = first_src_file
            # Get first attempt snapshot lists
            if vm.is_alive():
                vm.destroy(gracefully=False)
                # Reset VM to initial state
                vmxml_backup.sync("--snapshots-metadata")
                vm.start()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take)
            scenarios = prepare_case_scenarios(snap_del_disks, base_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]
            loop_case_in_scenarios(scenarios)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        # Delete reuse external disk if exists
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Delete snapshot disk
        libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
        # Clean up created folders
        for folder in [chr(letter) for letter in range(ord('a'), ord('a') + 4)]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)

        # Remove ceph config file if created
        if disk_src_protocol == "rbd":
            libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=False)
        elif disk_src_protocol == 'iscsi' or 'iscsi_target' in locals():
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'pool':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
            rm_cmd = "rm -rf %s" % pool_target
            process.run(rm_cmd, shell=True)

        # Recover images xattr if having some
        dirty_images = libvirt_disk.get_images_with_xattr(vm)
        if dirty_images:
            libvirt_disk.clean_images_with_xattr(dirty_images)
            test.error("VM's image(s) having xattr left")
예제 #3
0
def run(test, params, env):
    """
    Test blockjob operation

    """
    def setup_blockjob_raw():
        """
        Prepare running domain and do blockcopy
        """
        if not vm.is_alive():
            vm.start()

        if os.path.exists(tmp_copy_path):
            process.run('rm -rf %s' % tmp_copy_path)

        cmd = "blockcopy %s %s %s --wait --verbose --transient-job " \
              "--bandwidth 1000 " % (vm_name, dev, tmp_copy_path)
        virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                           auto_close=True)
        virsh_session.sendline(cmd)

    def test_blockjob_raw():
        """
        Do blockjob with raw option.

        1) Prepare a running guest.
        2) Do blockcopy.
        3) Do blockjob with raw option twice and check cur value is changing
        4) Execute with --pivot and check raw option return empty
        """
        options = params.get('option_value', '')
        # Confirm blockcopy not finished.
        if not libvirt.check_blockjob(vm_name, dev, "progress", "100"):
            res_1 = virsh.blockjob(vm_name,
                                   dev,
                                   options=options,
                                   debug=True,
                                   ignore_status=False)
            cur1 = libvirt_misc.convert_to_dict(res_1.stdout_text.strip(),
                                                pattern=r'(\S+)=(\S+)')['cur']
            time.sleep(1)
            res_2 = virsh.blockjob(vm_name,
                                   dev,
                                   options=options,
                                   debug=True,
                                   ignore_status=False)
            cur2 = libvirt_misc.convert_to_dict(res_2.stdout_text.strip(),
                                                pattern=r'(\S+)=(\S+)')['cur']
            LOG.debug('cur1 is %s , cur2 is %s', cur1, cur2)

            if int(cur1) >= int(cur2):
                test.fail('Two times cur value is not changed according'
                          ' to the progress of blockcopy.')

            # Abort job and check abort success.
            if utils_misc.wait_for(
                    lambda: libvirt.check_blockjob(vm_name, dev, "progress",
                                                   "100"), 100):
                virsh.blockjob(vm_name,
                               dev,
                               options=' --pivot',
                               debug=True,
                               ignore_status=False)
            else:
                test.fail("Blockjob timeout in 100 sec.")
            # Check no output after abort.
            result = virsh.blockjob(vm_name,
                                    dev,
                                    options=options,
                                    debug=True,
                                    ignore_status=False)
            if result.stdout_text.strip():
                test.fail("Not return empty after pivot, but get:%s",
                          result.stdout_text.strip())
        else:
            test.error(
                'Blockcopy finished too fast, unable to check raw result,\
             Please consider to reduce bandwith value to retest this case.')

    def teardown_blockjob_raw():
        """
        After blockcopy, clean file
        """
        # If some steps are broken by accident, execute --abort to
        # stop blockcopy job
        virsh.blockjob(vm_name,
                       dev,
                       options=' --abort',
                       debug=True,
                       ignore_status=True)
        # Clean copy file
        process.run('rm -rf %s' % tmp_copy_path)

    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    case_name = params.get('case_name', 'blockjob')
    dev = params.get('disk')
    # Create object
    test_obj = blockcommand_base.BlockCommand(test, vm, params)
    check_obj = check_functions.Checkfunction(test, vm, params)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    tmp_copy_path = os.path.join(
        os.path.dirname(libvirt_disk.get_first_disk_source(vm)),
        "%s_blockcopy.img" % vm_name)
    # MAIN TEST CODE ###
    run_test = eval("test_%s" % case_name)
    setup_test = eval("setup_%s" % case_name)
    teardown_test = eval("teardown_%s" % case_name)

    try:
        # Execute test
        setup_test()
        run_test()

    finally:
        teardown_test()
        bkxml.sync()
예제 #4
0
def run(test, params, env):
    """
    Test virsh blockpull with various option on VM.

    1.Prepare backend storage (iscsi,nbd,file,block)
    2.Start VM
    3.Execute virsh blockpull target command
    4.Check status after operation accomplished
    5.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def setup_iscsi_block_env(params):
        """
        Setup iscsi as block test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("emulated_size", "10G")
        chap_user = params.get("iscsi_user")
        chap_passwd = params.get("iscsi_password")
        auth_sec_usage_type = params.get("secret_usage_type")
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            chap_passwd.encode(encoding)).decode(encoding)

        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")

        auth_sec_uuid = libvirt_ceph_utils._create_secret(
            auth_sec_usage_type, secret_string)
        disk_auth_dict = {
            "auth_user": chap_user,
            "secret_type": auth_sec_usage_type,
            "secret_uuid": auth_sec_uuid
        }

        disk_src_dict = {'attrs': {'dev': device_source}}
        iscsi_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, disk_auth_dict)
        # Add disk xml.
        logging.debug("disk xml is:\n%s" % iscsi_disk)
        # Sync VM xml.
        vmxml.add_device(iscsi_disk)
        vmxml.sync()

    def setup_file_env(params):
        """
        Setup file test environment

        :param params: one dict to wrap up parameters
        """
        # If additional_disk is False, it means that there is no need to create additional disk
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        backstore_image_target_path = params.get("backstore_image_name")
        tmp_blkpull_path.append(backstore_image_target_path)
        libvirt.create_local_disk("file", backstore_image_target_path, "1",
                                  "qcow2")
        backing_chain_list.append(backstore_image_target_path)

        disk_src_dict = {"attrs": {"file": backstore_image_target_path}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def _generate_backstore_attribute(params):
        """
        Create one disk with backingStore attribute by creating snapshot

        :param params: one dict to wrap up parameters
        """
        device_target = params.get("virt_disk_device_target")
        top_file_image_name = params.get("top_file_image_name")
        second_file_image_name = params.get("second_file_image_name")
        tmp_blkpull_path.append(top_file_image_name)
        tmp_blkpull_path.append(second_file_image_name)
        backing_chain_list.append(top_file_image_name)
        if vm.is_dead():
            vm.start()
        snapshot_tmp_name = "blockpull_tmp_snap"
        options = " %s --disk-only --diskspec %s,file=%s" % (
            snapshot_tmp_name, 'vda', second_file_image_name)
        options += " --diskspec %s,file=%s" % (device_target,
                                               top_file_image_name)
        virsh.snapshot_create_as(vm_name,
                                 options,
                                 ignore_status=False,
                                 debug=True)
        vm.destroy()
        virsh.snapshot_delete(vm_name,
                              snapshot_tmp_name,
                              "--metadata",
                              ignore_status=False,
                              debug=True)
        vmxml_dir = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("backstore prepare readiness :\n%s", vmxml_dir)

    def setup_block_env(params):
        """
        Setup block test environment

        :param params: one dict to wrap up parameters
        """
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
        disk_src_dict = {'attrs': {'dev': device_source}}
        backing_chain_list.append(device_source)

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def setup_nbd_env(params):
        """
        Setup nbd test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port", "10001")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        enable_ga_agent = "yes" == params.get("enable_ga_agent", "no")

        # Create NbdExport object
        nbd = NbdExport(image_path,
                        image_format=device_format,
                        port=nbd_server_port)
        nbd.start_nbd_server()

        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % "no"}

        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        network_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)

        logging.debug("disk xml is:\n%s" % network_disk)
        # Sync VM xml.
        vmxml.add_device(network_disk)
        vmxml.sync()
        if enable_ga_agent:
            vm.prepare_guest_agent()
            vm.destroy(gracefully=False)

    def check_chain_backing_files(chain_list, disk_target):
        """
        Check chain backing files

        :param chain_list: list, expected backing chain list
        :param disk_target: disk target
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n",
                      disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        backingstore_list.pop()
        parse_source_file_list = [
            elem.find('source').get('file') or elem.find('source').get('name')
            or elem.find('source').get('dev') for elem in backingstore_list
        ]
        logging.debug("before expected backing chain list is %s", chain_list)
        chain_list = chain_list[0:3]
        if backend_storage_type == "nbd":
            chain_list = chain_list[0:1]
        chain_list = chain_list[::-1]
        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether two are equals
        if blockpull_option in ['keep_relative']:
            if parse_source_file_list[-1] != chain_list[-1]:
                test.fail(
                    "checked backchain list in last element is not equals to expected one"
                )
        elif parse_source_file_list != chain_list:
            test.fail("checked backchain list is not equals to expected one")

    def _extend_blkpull_execution(base=None,
                                  status_error=False,
                                  err_msg=None,
                                  expected_msg=None):
        """
        Wrap up blockpull execution combining with various options

        :params base: specific base
        :params status_error: expected error or not
        :params err_msg: error message if blockpull command fail
        :params expected_msg: jobinfo expected message if checked
        """
        blockpull_options = params.get("options")
        if '--base' in blockpull_options:
            if base:
                blockpull_options = params.get("options") % base
            else:
                blockpull_options = params.get(
                    "options") % external_snapshot_disks[0]
        result = virsh.blockpull(vm_name,
                                 device_target,
                                 blockpull_options,
                                 ignore_status=True,
                                 debug=True)
        libvirt.check_exit_status(result, expect_error=status_error)
        if status_error:
            if err_msg not in result.stdout_text and err_msg not in result.stderr_text:
                test.fail(
                    "Can not find failed message in standard output: %s or : %s"
                    % (result.stdout_text, result.stderr_text))
        res = virsh.blockjob(vm_name, device_target, "--info").stdout.strip()
        logging.debug("virsh jobinfo is :%s\n", res)
        if expected_msg:
            job_msg = expected_msg
        else:
            job_msg = "No current block job for %s" % device_target
        if res and job_msg not in res:
            test.fail("Find unexpected block job information in %s" % res)

    def start_async_blkpull_on_vm():
        """Start blockpull with async"""
        context_msg = "Pull aborted"
        _extend_blkpull_execution(None, True, context_msg)

    def start_bandwidth_blkpull_on_vm():
        """Start blockpull with bandwidth option """
        _extend_blkpull_execution()

    def start_timeout_blkpull_on_vm():
        """Start blockpull with timeout option """
        _extend_blkpull_execution()

    def start_middle_to_top_to_base_on_vm():
        """Start blockpull from middle to top """
        _extend_blkpull_execution()
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_reuse_external_blkpull_on_vm():
        """Start blockpull with reuse_external """
        _extend_blkpull_execution(base=backing_chain_list[1])
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_top_as_base_blkpull_on_vm():
        """Start blockpull with top as base """
        error_msg = "error: invalid argument"
        _extend_blkpull_execution(base=backing_chain_list[-1],
                                  status_error=True,
                                  err_msg=error_msg)

    def start_base_to_top_blkpull_on_vm():
        """Start blockpull with base as top """
        _extend_blkpull_execution()

    def start_middletotop_blkpull_on_vm():
        """start middletotop blockpull on vm """
        _extend_blkpull_execution()
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))

    # Disk specific attributes.
    type_name = params.get("virt_disk_device_type")
    disk_device = params.get("virt_disk_device")
    device_target = params.get("virt_disk_device_target")
    device_bus = params.get("virt_disk_device_bus")
    device_format = params.get("virt_disk_device_format")

    blockpull_option = params.get("blockpull_option")
    options_value = params.get("options_value")
    backend_storage_type = params.get("backend_storage_type")
    backend_path = params.get("backend_path")
    additional_disk = "yes" == params.get("additional_disk", "yes")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    snapshot_take = int(params.get("snapshot_take", "4"))
    fill_in_vm = "yes" == params.get("fill_in_vm", "no")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    pre_set_root_dir = os.path.dirname(first_src_file)
    replace_disk_image = None

    # Additional disk images.
    tmp_dir = data_dir.get_data_dir()
    tmp_blkpull_path = []
    disks_img = []
    external_snapshot_disks = []
    attach_disk_xml = None
    backing_chain_list = []

    # Initialize one NbdExport object
    nbd = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        utils_secret.clean_up_secrets()

        # Setup backend storage
        if backend_storage_type == "iscsi":
            setup_iscsi_block_env(params)
        elif backend_storage_type == "file":
            setup_file_env(params)
        elif backend_storage_type == "block":
            setup_block_env(params)
        elif backend_storage_type == "nbd":
            setup_nbd_env(params)
        try:
            vm.start()
            vm.wait_for_login().close()
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))

        if fill_in_vm:
            libvirt_disk.fill_null_in_vm(vm, device_target)

        if backend_path in ['native_path']:
            external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
                vm, device_target, "blockpull_snapshot", snapshot_take)
            backing_chain_list.extend(external_snapshot_disks)
        elif backend_path in ['reuse_external']:
            replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
                vm, pre_set_root_dir, first_src_file, device_format)
            params.update({
                'disk_source_name': replace_disk_image,
                'disk_type': 'file',
                'disk_format': 'qcow2',
                'disk_source_protocol': 'file'
            })
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if blockpull_option in ['middle_to_top']:
            start_middletotop_blkpull_on_vm()

        if blockpull_option in ['async']:
            start_async_blkpull_on_vm()

        if blockpull_option in ['bandwidth']:
            start_bandwidth_blkpull_on_vm()

        if blockpull_option in ['timeout']:
            start_timeout_blkpull_on_vm()

        if blockpull_option in ['middle_to_top_to_base']:
            start_middle_to_top_to_base_on_vm()

        if blockpull_option in ['keep_relative']:
            start_reuse_external_blkpull_on_vm()

        if blockpull_option in ['top_as_base']:
            start_top_as_base_blkpull_on_vm()

        if blockpull_option in ['base_to_top']:
            start_base_to_top_blkpull_on_vm()
    finally:
        # Recover VM.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        utils_secret.clean_up_secrets()
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        # Delete reuse external disk if exists
        for disk in external_snapshot_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up backend storage
        for tmp_path in tmp_blkpull_path:
            if os.path.exists(tmp_path):
                libvirt.delete_local_disk('file', tmp_path)
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if nbd:
            nbd.cleanup()
        # Clean up created folders
        for folder in [
                chr(letter) for letter in range(ord('a'),
                                                ord('a') + 4)
        ]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)
예제 #5
0
def run(test, params, env):
    """
    Test blockcopy with different options.

    1) Prepare an running guest.
    2) Create snap.
    3) Do blockcopy.
    4) Check status by 'qemu-img info'.
    """
    def setup_blockcopy_extended_l2():
        """
        Prepare running domain with extended_l2=on type image.
        """
        # prepare image
        image_path = test_obj.tmp_dir + '/new_image'

        libvirt.create_local_disk("file",
                                  path=image_path,
                                  size='500M',
                                  disk_format=disk_format,
                                  extra=disk_extras)
        check_obj.check_image_info(image_path,
                                   check_item='extended l2',
                                   expected_value='true')
        test_obj.new_image_path = image_path
        # start get old parts
        session = vm.wait_for_login()
        test_obj.old_parts = utils_disk.get_parts_list(session)
        session.close()
        # attach new disk
        if encryption_disk:
            secret_disk_dict = eval(params.get("secret_disk_dict", '{}'))
            test_obj.prepare_secret_disk(image_path, secret_disk_dict)
            if not vm.is_alive():
                vm.start()
        else:
            virsh.attach_disk(vm.name,
                              source=image_path,
                              target=device,
                              extra=attach_disk_extra,
                              debug=True,
                              ignore_status=False)
        test_obj.new_dev = device
        # clean copy file
        if os.path.exists(tmp_copy_path):
            process.run('rm -f %s' % tmp_copy_path)

    def test_blockcopy_extended_l2():
        """
        Do blockcopy after creating snapshot with extended_l2 in disk image
        """
        # create snap chain and check snap path extended_l2 status
        test_obj.prepare_snapshot(snap_num=1)
        check_obj.check_image_info(test_obj.snap_path_list[0],
                                   check_item='extended l2',
                                   expected_value='true')
        # Do blockcopy
        virsh.blockcopy(vm_name,
                        device,
                        tmp_copy_path,
                        options=blockcopy_options,
                        ignore_status=False,
                        debug=True)
        # Check domain exist blockcopy file and extended_l2 status
        if len(vmxml.get_disk_source(vm_name)) < 2:
            test.fail('Domain disk num is less than 2, may attach failed')
        else:
            image_file = vmxml.get_disk_source(vm_name)[1].find('source').get(
                'file')
            if image_file != tmp_copy_path:
                test.fail(
                    'Blockcopy path is not in domain disk ,'
                    ' blockcopy image path is %s ,actual image path '
                    'is :%s', tmp_copy_path, image_file)
            check_obj.check_image_info(tmp_copy_path,
                                       check_item='extended l2',
                                       expected_value='true')
        # Check domain write file
        session = vm.wait_for_login()
        new_parts = utils_disk.get_parts_list(session)
        added_parts = list(set(new_parts).difference(set(test_obj.old_parts)))
        utils_disk.linux_disk_check(session, added_parts[0])
        session.close()

    def teardown_blockcopy_extended_l2():
        """
        Clean env.
        """
        if encryption_disk:
            libvirt_secret.clean_up_secrets()
        test_obj.backingchain_common_teardown()
        # detach disk
        virsh.detach_disk(vm_name,
                          target=device,
                          wait_for_event=True,
                          debug=True)
        process.run('rm -f %s' % test_obj.new_image_path)

    def setup_blockcopy_synchronous_writes():
        """
        Start domain and clean exist copy file
        """
        if not vm.is_alive():
            vm.start()
        if os.path.exists(tmp_copy_path):
            process.run('rm -f %s' % tmp_copy_path)

    def test_blockcopy_synchronous_writes():
        """
        Test blockcopy with --synchronous-writes option.
        """
        ret = virsh.blockcopy(vm_name,
                              device,
                              tmp_copy_path,
                              options=blockcopy_options,
                              ignore_status=False,
                              debug=True)
        if not ret.stdout_text.count("Now in mirroring phase"):
            test.fail("Not in mirroring phase")
        test_obj.new_image_path = tmp_copy_path
        # Check exist mirror tag after blockcopy.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        disk_list = vmxml.get_disk_all()[device]
        if not disk_list.find('mirror'):
            test.fail('No mirror tag in current domain xml :%s' % vmxml)
        else:
            # Check file in mirror should be new copy file
            mirror_file = disk_list.find('mirror').get('file')
            if mirror_file != tmp_copy_path:
                test.fail('Current mirror tag file:%s is not same as:%s' %
                          (mirror_file, tmp_copy_path))
            # Check file in mirror >source > file should be new copy file
            mirror_source_file = disk_list.find('mirror').\
                find('source').get('file')
            if mirror_source_file != tmp_copy_path:
                test.fail('Current source tag file:%s is not same as:%s' %
                          (mirror_source_file, tmp_copy_path))

        # Check domain write file
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session, device)
        session.close()
        # Abort job and check disk source changed.
        virsh.blockjob(vm_name,
                       device,
                       options=' --pivot',
                       debug=True,
                       ignore_status=False)
        current_source = libvirt_disk.get_first_disk_source(vm)
        if current_source != tmp_copy_path:
            test.fail("Current source: %s is not same as original blockcopy"
                      " path:%s" % (current_source, tmp_copy_path))
        # Check domain write file after
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session, device)
        session.close()

    def teardown_blockcopy_synchronous_writes():
        """
        Clean env
        """
        if os.path.exists(test_obj.new_image_path):
            process.run('rm -f %s' % test_obj.new_image_path)

    libvirt_version.is_libvirt_feature_supported(params)

    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    case_name = params.get('case_name', '')
    device = params.get('target_disk')
    disk_extras = params.get('extras_options')
    blockcopy_options = params.get('blockcopy_option')
    attach_disk_extra = params.get("attach_disk_options")
    encryption_disk = params.get('enable_encrypt_disk', 'no') == "yes"
    disk_format = params.get('disk_format', 'qcow2')
    # Create object
    test_obj = blockcommand_base.BlockCommand(test, vm, params)
    check_obj = check_functions.Checkfunction(test, vm, params)
    # Get vm xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    test_obj.original_disk_source = libvirt_disk.get_first_disk_source(vm)
    tmp_copy_path = os.path.join(
        os.path.dirname(libvirt_disk.get_first_disk_source(vm)),
        "%s_blockcopy.img" % vm_name)
    # MAIN TEST CODE ###
    run_test = eval("test_%s" % case_name)
    setup_test = eval("setup_%s" % case_name)
    teardown_test = eval("teardown_%s" % case_name)

    try:
        # Execute test
        setup_test()
        run_test()

    finally:
        teardown_test()
        bkxml.sync()
예제 #6
0
def run(test, params, env):
    """
    Test domblkthreshold for domain which has backing chain
    """
    def setup_domblkthreshold_inactivate_layer():
        """
        Prepare backingchain
        """
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login().close()
        test_obj.prepare_snapshot(snap_num=1)

    def test_domblkthreshold_inactivate_layer():
        """
        Do domblkthreshold for a device which is not the active layer image
        """
        # Get backingstore index value and set domblkthreshold
        bs_index = ''
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        for disk in vmxml.devices.by_device_tag('disk'):
            if disk.target['dev'] == primary_target:
                bs_index = disk.xmltreefile.find('backingStore').get('index')

        virsh.domblkthreshold(vm_name,
                              '%s[%s]' % (primary_target, bs_index),
                              domblk_threshold,
                              debug=True,
                              ignore_status=False)

        # Create some data in active layer image
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session,
                                      '/tmp/file',
                                      bs='1M',
                                      count='100')
        session.close()

        # Check blockcommit will trigger threshold event
        event = r"\'block-threshold\' for domain .*%s.*: dev: %s\[%s\].*%s.*" \
                % (vm_name, primary_target, bs_index, domblk_threshold)
        LOG.debug('Checking event pattern is :%s ', event)

        event_session = virsh.EventTracker.start_get_event(vm_name)
        virsh.blockcommit(vm.name,
                          primary_target,
                          commit_options,
                          ignore_status=False,
                          debug=True)
        event_output = virsh.EventTracker.finish_get_event(event_session)
        if not re.search(event, event_output):
            test.fail('Not find: %s from event output:%s' %
                      (event, event_output))

    def teardown_domblkthreshold_inactivate_layer():
        """
        Clean env
        """
        test_obj.backingchain_common_teardown()

        process.run('rm -f %s' % test_obj.new_image_path)

    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    case_name = params.get('case_name', '')
    domblk_threshold = params.get('domblk_threshold')
    commit_options = params.get('commit_options')

    test_obj = blockcommand_base.BlockCommand(test, vm, params)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    primary_target = vm.get_first_disk_devices()["target"]
    test_obj.new_dev = primary_target
    test_obj.original_disk_source = libvirt_disk.get_first_disk_source(vm)

    run_test = eval("test_%s" % case_name)
    setup_test = eval("setup_%s" % case_name)
    teardown_test = eval("teardown_%s" % case_name)

    try:
        # Execute test
        setup_test()
        run_test()

    finally:
        teardown_test()
        bkxml.sync()
예제 #7
0
def run(test, params, env):
    """
    Test blockresize for domain which has backing chain element.

    """
    def setup_raw_disk_blockresize():
        """
        Prepare raw disk and create snapshots.
        """
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login().close()
        # Create raw type image
        image_path = test_obj.tmp_dir + '/blockresize_test'
        libvirt.create_local_disk("file",
                                  path=image_path,
                                  size='500K',
                                  disk_format="raw")
        test_obj.new_image_path = image_path
        # attach new disk
        virsh.attach_disk(vm.name,
                          source=image_path,
                          target=device,
                          extra=extra,
                          debug=True)
        test_obj.new_dev = device
        # create snap chain
        test_obj.prepare_snapshot()

    def test_raw_disk_blockresize():
        """
        Test blockresize for raw type device which has backing chain element.
        """
        new_size = params.get('expected_block_size')
        result = virsh.blockresize(vm_name,
                                   test_obj.snap_path_list[-1],
                                   new_size,
                                   debug=True)
        libvirt.check_exit_status(result)
        check_obj.check_image_info(test_obj.snap_path_list[-1], 'vsize',
                                   new_size)

    def teardown_raw_disk_blockresize():
        """
        Clean env and resize with origin size.
        """
        # clean new disk file
        test_obj.backingchain_common_teardown()
        # detach disk
        virsh.detach_disk(vm_name,
                          target=test_obj.new_dev,
                          wait_for_event=True,
                          debug=True)
        # clean image file
        process.run('rm -f %s' % test_obj.new_image_path)

    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    case_name = params.get('case_name', '')
    device = params.get('new_disk')
    extra = params.get('attach_disk_extra_options')
    # Create object
    test_obj = blockcommand_base.BlockCommand(test, vm, params)
    check_obj = check_functions.Checkfunction(test, vm, params)
    # Get vm xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    test_obj.original_disk_source = libvirt_disk.get_first_disk_source(vm)

    # MAIN TEST CODE ###
    run_test = eval("test_%s" % case_name)
    setup_test = eval("setup_%s" % case_name)
    teardown_test = eval("teardown_%s" % case_name)

    try:
        # Execute test
        setup_test()
        run_test()

    finally:
        teardown_test()
        bkxml.sync()