예제 #1
0
    def _post_start_vm(self):
        """
        The steps to be executed after vm started
        """
        # Steps:
        # 1. Create Checkpoint for test disk
        # 2. Generate 1M random data in that test disk
        # 3. Repeate 1~2 as required
        # 4. Get all checkpoints' hash values

        # create checkpoints and generate some random data in test disk
        for checkpoint_index in range(self.checkpoint_rounds):
            checkpoint_name = 'ck_' + str(checkpoint_index)
            self.create_checkpoint(self.main_vm, 'vdb', checkpoint_name)
            session = self.main_vm.wait_for_login()
            test_disk_in_vm = list(utils_disk.get_linux_disks(session).keys())[0]
            session.close()
            test_disk_in_vm = '/dev/' + test_disk_in_vm
            self.generate_data_in_test_disk(self.main_vm, test_disk_in_vm,
                                            checkpoint_index * 10 + 10)
            self.checkpoint_names.append(checkpoint_name)

        # get all checkpoints' hash value
        for checkpoint in self.checkpoint_names:
            checkpoint_info = {}
            checkpoint_info['name'] = checkpoint
            checkpoint_info['hash'] = self.get_checkpoint_hash_value(self.main_vm,
                                                                     self.test_disk_path,
                                                                     checkpoint)
            self.source_vm_checkpoints.append(checkpoint_info.copy())
예제 #2
0
 def _check_disk_partitions_number():
     """ Check the data disk partitions number. """
     del partitions[:]
     partitions.extend(
         re.findall(r'%s\d+' % dev_id,
                    ' '.join(utils_disk.get_linux_disks(session, True))))
     return len(partitions) == bs_count
예제 #3
0
def get_disk_info_by_param(tag, params, session):
    """
    Get disk info by by serial/wwn or by size.

    For most cases, only one data disk is used, we can use disk size to find
    it; if there are more than one, we should set the same wwn/serial for each
    data disk and its target, e.g.
      blk_extra_params_data1 = "serial=DATA_DISK1"
      blk_extra_params_mirror1 = "serial=DATA_DISK1"
      blk_extra_params_data2 = "serial=DATA_DISK2"
      blk_extra_params_mirror2 = "serial=DATA_DISK2"
    where mirror1/mirror2 are the mirror images of data1/data2, so when we
    restart vm with mirror1 and mirror2, we can find them by serials
    :param tag: image tag name
    :param params: Params object
    :param session: vm login session
    :return: The disk info dict(e.g. {'kname':xx, 'size':xx}) or None
    """
    info = None
    drive_path = None
    image_params = params.object_params(tag)
    if image_params.get('blk_extra_params'):
        # get disk by serial or wwn
        # utils_disk.get_linux_disks can also get serial, but for
        # virtio-scsi ID_SERIAL is a long string including serial
        # e.g. ID_SERIAL=0QEMU_QEMU_HARDDISK_DATA_DISK2 instead of
        # ID_SERIAL=DATA_DISK2
        m = re.search(r"(serial|wwn)=(\w+)", image_params["blk_extra_params"],
                      re.M)
        if m is not None:
            drive_path = utils_misc.get_linux_drive_path(session, m.group(2))

    if drive_path:
        info = {'kname': drive_path[5:], 'size': image_params['image_size']}
    else:
        # get disk by disk size
        conds = {
            'type': image_params.get('disk_type', 'disk'),
            'size': image_params['image_size']
        }
        disks = utils_disk.get_linux_disks(session, True)
        for kname, attr in disks.items():
            d = dict(zip(['kname', 'size', 'type'], attr))
            if all([conds[k] == d[k] for k in conds]):
                info = d
                break
    return info
예제 #4
0
 def format_data_disk(self, tag):
     session = self.main_vm.wait_for_login()
     try:
         disk_params = self.params.object_params(tag)
         disk_size = disk_params["image_size"]
         disks = utils_disk.get_linux_disks(session, True)
         for kname, attr in disks.items():
             if attr[1] == disk_size and attr[2] == "disk":
                 disk_id = kname
                 break
         else:
             raise exceptions.TestFail("disk not found in guest ...")
         disk_path = "/dev/%s1" % kname
         mount_point = utils_disk.configure_empty_linux_disk(
             session, disk_id, disk_size)[0]
         self.disks_info[tag] = [disk_path, mount_point]
     finally:
         session.close()
예제 #5
0
    def dd_test():
        """
        dd test to increase the number of interrupts
        """
        vm_disks = utils_disk.get_linux_disks(session)
        extra_disk = list(vm_disks.keys())[0] if vm_disks else None
        if not extra_disk:
            test.error("No additional disks found")

        error_context.context("Execute dd write test", logging.info)
        session.cmd(params["dd_write"] % extra_disk)
        irq_info_after_dd_write = get_irq_info()
        analyze_interrupts(irq_info_before_test, irq_info_after_dd_write)

        error_context.context("Execute dd read test", logging.info)
        session.cmd(params["dd_read"] % extra_disk)
        irq_info_after_dd_read = get_irq_info()
        analyze_interrupts(irq_info_after_dd_write, irq_info_after_dd_read)
예제 #6
0
    def get_disk_size(did):
        """
        Get the disk size from guest.

        :param did: the disk of id, e.g. sdb,sda for linux, 1, 2 for windows
        :return: the disk size
        """
        if params['os_type'] == 'linux':
            size = utils_disk.get_linux_disks(session)[did][1].strip()
        else:
            script = '{}_{}'.format("disk",
                                    utils_misc.generate_random_string(6))
            cmd = "echo %s > {0} && diskpart /s {0} && del /f {0}".format(
                script)
            p = r'Disk\s+%s\s+[A-Z]+\s+\d+\s+[A-Z]+\s+(?P<free>\d+\s+[A-Z]+)'
            disk_info = session.cmd(cmd % 'list disk')
            size = re.search(p % did, disk_info,
                             re.I | re.M).groupdict()['free'].strip()
        logging.info('The size of disk[%s] is %s' % (did, size))
        return size
예제 #7
0
    def get_disk_size(did):
        """
        Get the disk size from guest.

        :param did: the disk of id, e.g. sdb,sda for linux, 1, 2 for windows
        :return: the disk size
        """
        session = vm.wait_for_login(timeout=timeout)
        if windows:
            script = '{}_{}'.format("disk",
                                    utils_misc.generate_random_string(6))
            cmd = "echo %s > {0} && diskpart /s {0} && del /f {0}".format(
                script)
            p = r'Disk\s+%s\s+[A-Z]+\s+(?P<size>\d+\s+[A-Z]+)\s+'
            disk_info = session.cmd(cmd % 'list disk')
            size = re.search(p % did, disk_info,
                             re.I | re.M).groupdict()['size'].strip()
        else:
            size = utils_disk.get_linux_disks(session)[did][1].strip()
        logging.info('The size of disk %s is %s' % (did, size))
        session.close()
        return size
예제 #8
0
 def get_disk_op_cmd(disk_op_cmd, disk_size):
     """
     Find the disk driver letter and format the disk, and get
     disk driver letter,return disk_op_command
     """
     if os_type == "windows":
         disk_op_cmd = utils_misc.set_winutils_letter(session, disk_op_cmd)
         logging.info("Get windows disk index that to be formatted")
         disk_id = utils_disk.get_windows_disks_index(session, disk_size)
         if not utils_disk.update_windows_disk_attributes(session, disk_id):
             test.error("Failed to enable data disk %s" % disk_id)
         d_letter = utils_disk.configure_empty_windows_disk(session,
                                                            disk_id[0],
                                                            disk_size)[0]
         output_path = d_letter + ":\\test.dat"
     else:
         disks = utils_disk.get_linux_disks(session)
         for key, value in disks.items():
             if value[1] == disk_size and value[2] == "disk":
                 output_path = key
     if not output_path:
         test.fail("Can not get output file path in guest.")
     disk_op_cmd %= output_path
     return disk_op_cmd
예제 #9
0
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """
    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_data_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_host = params.get("ceph_mon_host",
                                       "EXAMPLE_MON_HOST_AUTHX")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT")
            ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL")
            ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE")
            ceph_disk_name = ceph_pool_name + "/" + ceph_file_name
            ceph_client_name = params.get("ceph_client_name",
                                          "EXAMPLE_CLIENT_NAME")
            ceph_client_key = params.get("ceph_client_key",
                                         "EXAMPLE_CLIENT_KEY")
            ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER")
            ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_host)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid,
                        "auth_in_source": True
                    }
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_host, key_file)
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'rbd',
                'source_name': ceph_disk_name,
                'source_host_name': ceph_mon_host,
                'source_host_port': ceph_host_port
            }
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {"attrs": {}}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(
                            tmp_dir, target_file_name)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size,
                                                      target_driver)
                        target_params["attrs"]["file"] = target_file_path
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=target_blkdev_size)
                        target_params["attrs"]["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail(
                            "We do not support backup target type: '%s'" %
                            target_type)
                    logging.debug("target params: %s", target_params)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target),
                    60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" %
                            backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(
                    original_data_file,
                    backup_path,
                    backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove local backup file
        if "target_file_path" in locals():
            if os.path.exists(target_file_path):
                os.remove(target_file_path)

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
예제 #10
0
 def _check_disk_partitions_number():
     """ Check the data disk partitions number. """
     disks = utils_disk.get_linux_disks(session, True)
     return len(re.findall(r'%s\d+' % device_name[5:], ' '.join(disks))) == 1
예제 #11
0
 def get_linux_disk_path(session, disk_size):
     disks = utils_disk.get_linux_disks(session, True)
     for kname, attr in disks.items():
         if attr[1] == disk_size and attr[2] == "disk":
             return kname
     return None
예제 #12
0
def run(test, params, env):
    """
    Run IOzone for linux on a linux guest:
    1) Log into a guest.
    2) Execute the IOzone test.
    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _get_data_disks():
        """ Get the data disks by serial or wwn options. """
        disks = {}
        for data_image in params["images"].split()[1:]:
            extra_params = params.get("blk_extra_params_%s" % data_image, '')
            match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M)
            if match:
                drive_id = match.group(2)
            else:
                continue
            drive_path = utils_misc.get_linux_drive_path(session, drive_id)
            if not drive_path:
                test.error("Failed to get '%s' drive path" % data_image)
            disks[drive_path[5:]] = data_image
        return disks

    def _get_mounted_points():
        """ Get the mounted points. """
        points = []
        for id in re.finditer(r'(%s\d+)' % did, ' '.join(disks)):
            s = re.search(r'/dev/%s\s+(\S+)\s+' % id.group(1), mount_info,
                          re.M)
            if s:
                points.append(s.group(1))
        return points

    def _wait_for_procs_done(timeout=1800):
        """ Wait all the processes are done. """
        if not utils_misc.wait_for(
                lambda: 'iozone' not in session.cmd_output('pgrep -xl iozone'),
                timeout,
                step=3.0):
            test.error('Not all iozone processes done in %s sec.' % timeout)

    iozone_test_dir = params.get('iozone_test_dir', '/home')
    iozone_cmd_options = params['iozone_cmd_options']
    iozone_timeout = float(params.get("iozone_timeout", 1800))
    n_partitions = params.get('partitions_num', 1)
    fstype = params.get('fstype', 'xfs')
    labeltype = params.get('labeltype', utils_disk.PARTITION_TABLE_TYPE_GPT)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(
        timeout=float(params.get("login_timeout", 360)))
    _wait_for_procs_done()
    error_context.context("Running IOzone command on guest.")
    iozone = generate_instance(params, vm, 'iozone')
    try:
        dids = _get_data_disks()
        if dids:
            mount_info = session.cmd_output_safe(
                'cat /proc/mounts | grep \'/dev/\'')
            disks = utils_disk.get_linux_disks(session, True)
            for did, image_name in dids.items():
                size = params.get('image_size_%s' % image_name)
                start = params.get('image_start_%s' % image_name, "0M")
                mounted_points = _get_mounted_points()
                if not mounted_points:
                    mounted_points = utils_disk.configure_empty_linux_disk(
                        session, did, size, start, n_partitions, fstype,
                        labeltype)
                for mounted_point in mounted_points:
                    iozone.run(iozone_cmd_options % mounted_point,
                               iozone_timeout)
                utils_disk.clean_partition_linux(session, did)
        else:
            iozone.run(iozone_cmd_options % iozone_test_dir, iozone_timeout)
    finally:
        if params.get('sub_test_shutdown_vm', 'no') == 'no':
            iozone.clean()
        session.close()
def run(test, params, env):

    """
    Special hardware test case.
    FC host: ibm-x3650m4-05.lab.eng.pek2.redhat.com
    Disk serial name: scsi-360050763008084e6e0000000000001a4
    # multipath -ll
    mpathb (360050763008084e6e0000000000001a8) dm-4 IBM,2145
    size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
    |-+- policy='service-time 0' prio=50 status=active
    | `- 2:0:1:0 sde 8:64 active ready running
    `-+- policy='service-time 0' prio=10 status=enabled
      `- 2:0:0:0 sdd 8:48 active ready running
    mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
    size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
    |-+- policy='service-time 0' prio=50 status=active
    | `- 1:0:1:0 sdc 8:32 active ready running
    `-+- policy='service-time 0' prio=10 status=enabled
      `- 1:0:0:0 sdb 8:16 active ready running
    Customer Bug ID: 1720047  1753992

    format passthrough disk and do iozone test on it.

    1) pass-through /dev/sdb
    2) format it and do iozone test
    3) pass-through /dev/sg1
    4) format it and do iozone test
    5) pass-through /dev/mapper/mpatha
    6) format it and do iozone test

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """

    def get_multipath_disks(mpath_name="mpatha"):

        """
        Get all disks of multiple paths.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16  active ready running

        :param mpath_name: multi-path name.
        :return: a list. if get disks successfully or raise a error
        """
        disks = []
        disk_str = []
        outputs = process.run("multipath -ll", shell=True).stdout.decode()
        outputs = outputs.split(mpath_name)[-1]
        disk_str.append("active ready running")
        disk_str.append("active faulty offline")
        disk_str.append("failed faulty offline")
        for line in outputs.splitlines():
            if disk_str[0] in line or disk_str[1] in line or disk_str[2] in line:
                disks.append(line.split()[-5])
        if not disks:
            test.fail("Failed to get disks by 'multipath -ll'")
        else:
            return disks

    def get_multipath_disks_status(mpath_name="mpatha"):

        """
        Get status of multiple paths.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16  active ready running

        :param mpath_name: multi-path name.
        :return: a list. if get status successfully or raise a error
        """
        disks = get_multipath_disks(mpath_name)
        disks_status = []
        outputs = process.run("multipath -ll", shell=True).stdout.decode()
        outputs = outputs.split(mpath_name)[-1]
        for line in outputs.splitlines():
            for i in range(len(disks)):
                if disks[i] in line:
                    disks_status.append(line.strip().split()[-1])
                    break
        if not disks_status or len(disks_status) != len(disks):
            test.fail("Failed to get disks status by 'multipath -ll'")
        else:
            return disks_status

    def compare_multipath_status(status, mpath_name="mpatha"):

        """
        Compare status whether equal to the given status.
        This function just focus on all paths are running or all are offline.

        :param status: the state of disks.
        :param mpath_name: multi-path name.
        :return: True, if equal to the given status or False
        """
        status_list = get_multipath_disks_status(mpath_name)
        if len(set(status_list)) == 1 and status_list[0] == status:
            return True
        else:
            return False

    def set_disk_status_to_online_offline(disk, status):

        """
        set disk state to online/offline.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16 failed faulty offline

        :param disk: disk name.
        :param status: the state of disk.
        :return: by default
        """
        error_context.context("Set disk '%s' to status '%s'."
                              % (disk, status), logging.info)
        process.run("echo %s >  /sys/block/%s/device/state"
                    % (status, disk), shell=True)

    def set_multipath_disks_status(disks, status):

        """
        set multiple paths to same status. all disks online or offline.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16 failed faulty offline

        :param disks: disk list.
        :param status: the state of disk. online/offline
        :return: by default
        """
        for disk in disks:
            set_disk_status_to_online_offline(disk, status)
        wait.wait_for(lambda: compare_multipath_status(status),
                      first=2, step=1.5, timeout=60)

    def _do_post_cmd(session):
        """
        do post command after test.

        :param session: A shell session object.
        :return: by default
        """
        cmd = params.get("post_cmd")
        if cmd:
            session.cmd_status_output(cmd)
        session.close()

    def get_disk_by_sg_map(image_name_stg):
        """
        get disk name by sg_map.

        :param image_name_stg: linux sg. e.g. /dev/sg1
        :return: disk name. e.g. sdb
        """
        outputs = process.run("sg_map", shell=True).stdout.decode().splitlines()
        for output in outputs:
            if image_name_stg in output:
                return output.strip().split()[-1].split("/")[-1]

    def _get_windows_disks_index(session, image_size):
        """
        Get all disks index which show in 'diskpart list disk'.
        except for system disk.
        in diskpart: if disk size < 8GB: it displays as MB
                     else: it displays as GB

        :param session: session object to guest.
        :param image_size: image size. e.g. 40M
        :return: a list with all disks index except for system disk.
        """
        disk = "disk_" + ''.join(random.sample(string.ascii_letters + string.digits, 4))
        disk_indexs = []
        list_disk_cmd = "echo list disk > " + disk
        list_disk_cmd += " && echo exit >> " + disk
        list_disk_cmd += " && diskpart /s " + disk
        list_disk_cmd += " && del /f " + disk
        disks = session.cmd_output(list_disk_cmd)
        size_type = image_size[-1] + "B"
        if size_type == "MB":
            disk_size = image_size[:-1] + " MB"
        elif size_type == "GB" and int(image_size[:-1]) < 8:
            disk_size = str(int(image_size[:-1]) * 1024) + " MB"
        else:
            disk_size = image_size[:-1] + " GB"
        regex_str = 'Disk (\d+).*?%s' % disk_size
        for disk in disks.splitlines():
            if disk.startswith("  Disk"):
                o = re.findall(regex_str, disk, re.I | re.M)
                if o:
                    disk_indexs.append(o[0])
        return disk_indexs

    def delete_partition_on_host(did):
        """
        Delete partitions on the given disk.

        :param did: disk ID. disk kname. e.g. 'sdb', 'nvme0n1'
        :return: by default.
        """
        # process.run("partprobe /dev/%s" % did, shell=True)
        list_disk_cmd = "lsblk -o KNAME,MOUNTPOINT"
        output = process.run(list_disk_cmd, shell=True).stdout.decode()
        regex_str = did + "\w*(\d+)"
        rm_cmd = 'parted -s "/dev/%s" rm %s'
        for line in output.splitlines():
            partition = re.findall(regex_str, line, re.I | re.M)
            if partition:
                if "/" in line.split()[-1]:
                    process.run("umount %s" % line.split()[-1], shell=True)
        list_partition_number = "parted -s /dev/%s print|awk '/^ / {print $1}'"
        partition_numbers = process.run(list_partition_number % did,
                                        shell=True).stdout.decode()
        ignore_err_msg = "unrecognised disk label"
        if ignore_err_msg in partition_numbers:
            logging.info("no partition to delete on %s" % did)
        else:
            partition_numbers = partition_numbers.splitlines()
            for number in partition_numbers:
                logging.info("remove partition %s on %s" % (number, did))
                process.run(rm_cmd % (did, number), shell=True)
            process.run("partprobe /dev/%s" % did, shell=True)

    def delete_multipath_partition_on_host(mpath_name="mpatha"):
        """
        Delete partitions on the given multipath.

        :param mpath_name: multi-path name.
        :return: by default.
        """
        # process.run("partprobe /dev/mapper/%s" % mpath_name, shell=True)
        output = process.run("lsblk", shell=True).stdout.decode()
        mpath_dict = {}
        pattern = r'%s(\d+)' % mpath_name
        rm_cmd = 'parted -s "/dev/mapper/%s" rm %s'
        for line in output.splitlines():
            for part_num in re.findall(pattern, line, re.I | re.M):
                if part_num not in mpath_dict.keys():
                    mpath_dict[part_num] = line.split()[-1]
        for key, value in mpath_dict.items():
            if "/" in value:
                process.run("umount %s" % value, shell=True)
            logging.info("remove partition %s on %s" % (key, mpath_name))
            process.run(rm_cmd % (mpath_name, key), shell=True)
        output = process.run("dmsetup ls", shell=True).stdout.decode()
        for line in output.splitlines():
            for key in mpath_dict.keys():
                if (mpath_name + key) in line:
                    process.run("dmsetup remove %s%s" % (mpath_name, key),
                                shell=True)
        process.run("partprobe /dev/mapper/%s" % mpath_name, shell=True)

    def clean_partition_on_host(mpath_name="mpatha"):
        """
        Delete partitions on multi-path disks.

        :param mpath_name: multi-path name.
        :return: by default
        """
        delete_multipath_partition_on_host(mpath_name)
        disks = get_multipath_disks(mpath_name)
        for disk in disks:
            delete_partition_on_host(disk)

    error_context.context("Get FC host name:", logging.info)
    hostname = process.run("hostname", shell=True).stdout.decode().strip()
    if hostname != params["special_host"]:
        test.cancel("The special host is not '%s', cancel the test."
                    % params["special_host"])
    error_context.context("Get FC disk serial name:", logging.info)
    stg_serial_name = params["stg_serial_name"]
    image_name_stg = params["image_name_stg"].split("/")[-1]
    if "mpath" not in image_name_stg:
        if "sg" in image_name_stg:
            image_name_stg = get_disk_by_sg_map(image_name_stg)
            if not image_name_stg:
                test.cancel("Failed to get disk by sg_map,"
                            " output is '%s'" % image_name_stg)
        query_cmd = "udevadm info -q property -p /sys/block/%s" % image_name_stg
        outputs = process.run(query_cmd, shell=True).stdout.decode().splitlines()
        for output in outputs:
            # ID_SERIAL=360050763008084e6e0000000000001a4
            if stg_serial_name in output and output.startswith("ID_SERIAL="):
                break
        else:
            test.cancel("The special disk is not '%s', cancel the test."
                        % stg_serial_name)
    else:
        outputs = process.run("multipath -ll", shell=True).stdout.decode().splitlines()
        for output in outputs:
            if stg_serial_name in output and image_name_stg in output:
                break
        else:
            test.cancel("The special disk is not '%s', cancel the test."
                        % stg_serial_name)
        mpath_name = image_name_stg
        multi_disks = get_multipath_disks(mpath_name)
        error_context.context("Get all disks for '%s': %s"
                              % (mpath_name, multi_disks), logging.info)
        error_context.context("Verify all paths are running for %s before "
                              "start vm." % mpath_name, logging.info)
        if compare_multipath_status("running", mpath_name):
            logging.info("All paths are running for %s." % mpath_name)
        else:
            logging.info("Not all paths are running for %s, set "
                         "them to running." % mpath_name)
            set_multipath_disks_status(multi_disks, "running")
    error_context.context("Delete partitions on host before testing.",
                          logging.info)
    clean_partition_on_host(params["mpath_name"])
    vm = env.get_vm(params["main_vm"])
    try:
        vm.create(params=params)
    except Exception as e:
        test.error("failed to create VM: %s" % six.text_type(e))
    session = vm.wait_for_login(timeout=int(params.get("timeout", 240)))
    file_system = [_.strip() for _ in params["file_system"].split()]
    labeltype = params.get("labeltype", "gpt")
    iozone_target_num = int(params.get('iozone_target_num', '5'))
    iozone_options = params.get('iozone_options')
    iozone_timeout = float(params.get('iozone_timeout', '7200'))
    image_num_stg = int(params["image_num_stg"])
    image_size_stg = params["image_size_stg"]
    ostype = params["os_type"]
    try:
        if ostype == "windows":
            error_context.context("Get windows disk index that to "
                                  "be formatted", logging.info)
            # disks = utils_disk.get_windows_disks_index(session, image_size_stg)
            disks = _get_windows_disks_index(session, image_size_stg)
            if len(disks) != image_num_stg:
                test.fail("Failed to list all disks by image size. The expected "
                          "is %s, actual is %s" % (image_num_stg, len(disks)))
            error_context.context("Clear readonly for all disks and online "
                                  "them in windows guest.", logging.info)
            if not utils_disk.update_windows_disk_attributes(session, disks):
                test.fail("Failed to update windows disk attributes.")
        else:
            error_context.context("Get linux disk that to be "
                                  "formatted", logging.info)
            disks = sorted(utils_disk.get_linux_disks(session).keys())
            if len(disks) != image_num_stg:
                test.fail("Failed to list all disks by image size. The expected "
                          "is %s, actual is %s" % (image_num_stg, len(disks)))
            # logging.info("Get data disk by serial name: '%s'" % stg_serial_name)
            # drive_path = utils_misc.get_linux_drive_path(session, stg_serial_name)
            # if not drive_path:
            #     test.fail("Failed to get data disk by serial name: %s"
            #               % stg_serial_name)
    except Exception:
        _do_post_cmd(session)
        raise
    if iozone_options:
        iozone = generate_instance(params, vm, 'iozone')
    try:
        error_context.context("Make sure guest is running before test",
                              logging.info)
        vm.resume()
        vm.verify_status("running")
        for n, disk in enumerate(disks):
            error_context.context("Format disk in guest: '%s'" % disk,
                                  logging.info)
            # Random select one file system from file_system
            index = random.randint(0, (len(file_system) - 1))
            fstype = file_system[index].strip()
            partitions = utils_disk.configure_empty_disk(
                session, disk, image_size_stg, ostype,
                fstype=fstype, labeltype=labeltype)
            if not partitions:
                test.fail("Fail to format disks.")
            for partition in partitions:
                if iozone_options and n < iozone_target_num:
                    iozone.run(iozone_options.format(partition), iozone_timeout)
        error_context.context("Reboot guest after format disk "
                              "and iozone test.", logging.info)
        session = vm.reboot(session=session, timeout=int(params.get("timeout", 240)))
        error_context.context("Shutting down guest after reboot it.", logging.info)
        vm.graceful_shutdown(timeout=int(params.get("timeout", 240)))
        if vm.is_alive():
            test.fail("Fail to shut down guest.")
        error_context.context("Start the guest again.", logging.info)
        vm = env.get_vm(params["main_vm"])
        vm.create(params=params)
        session = vm.wait_for_login(timeout=int(params.get("timeout", 240)))
        error_context.context("Delete partitions in guest.", logging.info)
        for disk in disks:
            utils_disk.clean_partition(session, disk, ostype)
        error_context.context("Inform the OS of partition table changes "
                              "after testing.", logging.info)
        if "sg1" in params["image_name_stg"]:
            image_name_stg = get_disk_by_sg_map(params["image_name_stg"])
            process.run("partprobe /dev/%s" % image_name_stg, shell=True)
        else:
            process.run("partprobe %s" % params["image_name_stg"], shell=True)
    finally:
        if iozone_options:
            iozone.clean()
        _do_post_cmd(session)
예제 #14
0
def run(test, params, env):
    """
    Run IOzone for linux on a linux guest:
    1) Log into a guest.
    2) Execute the IOzone test.
    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _get_data_disks():
        """ Get the data disks by serial or wwn options. """
        disks = {}
        for data_image in params["images"].split()[1:]:
            extra_params = params.get("blk_extra_params_%s" % data_image, '')
            match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M)
            if match:
                drive_id = match.group(2)
            else:
                continue
            drive_path = utils_misc.get_linux_drive_path(session, drive_id)
            if not drive_path:
                test.error("Failed to get '%s' drive path" % data_image)
            disks[drive_path[5:]] = data_image
        return disks

    def _get_mounted_points():
        """ Get the mounted points. """
        points = []
        for id in re.finditer(r'(%s\d+)' % did, ' '.join(disks)):
            s = re.search(r'/dev/%s\s+(\S+)\s+' % id.group(1), mount_info, re.M)
            if s:
                points.append(s.group(1))
        return points

    def _wait_for_procs_done(timeout=1800):
        """ Wait all the processes are done. """
        if not utils_misc.wait_for(
                lambda: 'iozone' not in session.cmd_output('pgrep -xl iozone'),
                timeout, step=3.0):
            test.error('Not all iozone processes done in %s sec.' % timeout)

    iozone_test_dir = params.get('iozone_test_dir', '/home')
    iozone_cmd_options = params['iozone_cmd_options']
    iozone_timeout = float(params.get("iozone_timeout", 1800))
    n_partitions = params.get('partitions_num', 1)
    fstype = params.get('fstype', 'xfs')
    labeltype = params.get('labeltype', utils_disk.PARTITION_TABLE_TYPE_GPT)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=float(params.get("login_timeout", 360)))
    _wait_for_procs_done()
    error_context.context("Running IOzone command on guest.")
    try:
        iozone = generate_instance(params, session, 'iozone')
        dids = _get_data_disks()
        if dids:
            mount_info = session.cmd_output_safe('cat /proc/mounts | grep \'/dev/\'')
            disks = utils_disk.get_linux_disks(session, True)
            for did, image_name in dids.items():
                size = params.get('image_size_%s' % image_name)
                start = params.get('image_start_%s' % image_name, "0M")
                mounted_points = _get_mounted_points()
                if not mounted_points:
                    mounted_points = utils_disk.configure_empty_linux_disk(
                        session, did, size, start, n_partitions, fstype, labeltype)
                for mounted_point in mounted_points:
                    iozone.run(iozone_cmd_options % mounted_point, iozone_timeout)
                utils_disk.clean_partition_linux(session, did)
        else:
            iozone.run(iozone_cmd_options % iozone_test_dir, iozone_timeout)
    finally:
        iozone.clean()
        session.close()
예제 #15
0
def run(test, params, env):
    """
    Integration test of backup and backing_chain.

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. before the last round of backup job, do a blockcommit/pull/copy
    8. check the full/incremental backup file data
    """
    def run_blk_cmd():
        """
        Run blockcommit/blockpull/blockcopy command.
        """
        def run_blockpull():
            """
            Run blockpull command.
            """
            if from_to == "mid_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(
                    original_disk_target, middle_layer1_index)
            elif from_to == "base_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(
                    original_disk_target, base_layer_index)
            virsh.blockpull(vm_name,
                            original_disk_target,
                            cmd_option,
                            debug=True,
                            ignore_status=False)

        def run_blockcommit():
            """
            Run blockcommit command.
            """
            if from_to == "top_to_base":
                # Do blockcommit from top layer to base layer
                cmd_option = (
                    "--top {0}[{1}] --base {0}[{2}] --active --pivot "
                    "--wait".format(original_disk_target, top_layer_index,
                                    base_layer_index))

            elif from_to == "mid_to_mid":
                # Do blockcommit from middle layer to another middle layer
                if len(indice) < 4:
                    test.fail(
                        "At lease 4 layers required for the test 'mid_to_mid'")
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              middle_layer2_index))
            elif from_to == "top_to_mid":
                # Do blockcommit from top layer to middle layer
                cmd_option = (
                    "--top {0}[{1}] --base {0}[{2}] --active --pivot "
                    "--wait".format(original_disk_target, top_layer_index,
                                    middle_layer1_index))
            elif from_to == "mid_to_base":
                # Do blockcommit from middle layer to base layer
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              base_layer_index))
            virsh.blockcommit(vm_name,
                              original_disk_target,
                              cmd_option,
                              debug=True,
                              ignore_stauts=False)

        def run_blockcopy():
            """
            Run blockcopy command.
            """
            copy_dest = os.path.join(tmp_dir, "copy_dest.qcow2")
            cmd_option = "--wait --verbose --transient-job --pivot"
            if blockcopy_method == "shallow_copy":
                cmd_option += " --shallow"
            if blockcopy_reuse == "reuse_external":
                cmd_option += " --reuse-external"
                if blockcopy_method == "shallow_copy":
                    create_img_cmd = "qemu-img create -f qcow2 -F qcow2 -b %s %s"
                    create_img_cmd %= (backend_img, copy_dest)
                else:
                    create_img_cmd = "qemu-img create -f qcow2 %s %s"
                    create_img_cmd %= (copy_dest, original_disk_size)
                process.run(create_img_cmd, shell=True, ignore_status=False)
            virsh.blockcopy(vm_name,
                            original_disk_target,
                            copy_dest,
                            cmd_option,
                            debug=True,
                            ignore_status=False)

        # Get disk backing store indice info in vm disk xml
        cur_vm_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        cur_disk_xmls = cur_vm_xml.get_devices(device_type="disk")
        cur_test_disk_xml = ''
        for disk_xml in cur_disk_xmls:
            if disk_xml.target['dev'] == original_disk_target:
                cur_test_disk_xml = disk_xml
                logging.debug("Current disk xml for %s is:\n %s",
                              original_disk_target, cur_test_disk_xml)
                break
        indice = re.findall(r".*index=['|\"](\d+)['|\"].*",
                            str(cur_test_disk_xml))
        logging.debug("backing store indice for %s is: %s",
                      original_disk_target, indice)
        if len(indice) < 3:
            test.fail("At least 3 layers required for the test.")
        top_layer_index = indice[0]
        middle_layer1_index = indice[1]
        middle_layer2_index = indice[-2]
        base_layer_index = indice[-1]
        logging.debug(
            "Following backing store will be used: %s",
            "top:%s; middle_1: %s, middle_2:%s, base: %s" %
            (top_layer_index, middle_layer1_index, middle_layer2_index,
             base_layer_index))
        # Start the block command
        if blockcommand == "blockpull":
            run_blockpull()
        if blockcommand == "blockcommit":
            run_blockcommit()
        if blockcommand == "blockcopy":
            run_blockcopy()

    def create_shutoff_snapshot(original_img, snapshot_img):
        """
        Create shutoff snapshot, which means the disk snapshot is not controlled
        by libvirt, but created directly by qemu command.

        :param original_img: The image we will take shutoff snapshot for.
        :param snapshot_img: The newly created shutoff snapshot image.
        """
        cmd = "qemu-img info --output=json -f qcow2 {}".format(original_img)
        img_info = process.run(cmd, shell=True,
                               ignore_status=False).stdout_text
        json_data = json.loads(img_info)
        cmd = "qemu-img create -f qcow2 -F qcow2 -b {0} {1}".format(
            original_img, snapshot_img)
        process.run(cmd, shell=True, ignore_status=False)
        try:
            bitmaps = json_data['format-specific']['data']['bitmaps']
            for bitmap in bitmaps:
                bitmap_flags = bitmap['flags']
                bitmap_name = bitmap['name']
                if 'auto' in bitmap_flags and 'in-use' not in bitmap_flags:
                    cmd = "qemu-img bitmap -f qcow2 {0} --add {1}".format(
                        snapshot_img, bitmap_name)
                    process.run(cmd, shell=True, ignore_status=False)
        except Exception as bitmap_error:
            logging.debug("Cannot add bitmap to new image, skip it: %s",
                          bitmap_error)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    # vm's origianl disk config
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")

    # pull mode backup config
    scratch_type = params.get("scratch_type", "file")
    nbd_protocol = params.get("nbd_protocol", "tcp")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")

    # test config
    backup_rounds = int(params.get("backup_rounds", 4))
    shutoff_snapshot = "yes" == params.get("shutoff_snapshot")
    blockcommand = params.get("blockcommand")
    from_to = params.get("from_to")
    blockcopy_method = params.get("blockcopy_method")
    blockcopy_reuse = params.get("blockcopy_reuse")
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        disks_not_tested = list(vmxml.get_disk_all().keys())
        logging.debug("Not tested disks are: %s", disks_not_tested)
        utils_backup.enable_inc_backup_for_vm(vm)

        # Destroy vm before test
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "%s_image.qcow2" % original_disk_target
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        else:
            logging.cancel("The disk type '%s' not supported in this script.",
                           original_disk_type)
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        snapshot_list = []
        cur_disk_xml = disk_xml
        cur_disk_path = disk_path
        cur_disk_params = disk_params
        backend_img = ""
        for backup_index in range(backup_rounds):
            # Do external snapshot
            if shutoff_snapshot:
                virsh.detach_disk(vm.name,
                                  original_disk_target,
                                  extra="--persistent",
                                  ignore_status=False,
                                  debug=True)
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                shutoff_snapshot_name = "shutoff_snap_%s" % str(backup_index)
                shutoff_snapshot_path = os.path.join(tmp_dir,
                                                     shutoff_snapshot_name)

                create_shutoff_snapshot(cur_disk_path, shutoff_snapshot_path)
                cur_disk_params["source_file"] = shutoff_snapshot_path
                cur_disk_xml = libvirt.create_disk_xml(cur_disk_params)
                virsh.attach_device(vm.name,
                                    cur_disk_xml,
                                    flagstr="--config",
                                    ignore_status=False,
                                    debug=True)
                vm.start()
                vm.wait_for_login().close()
                cur_disk_path = shutoff_snapshot_path
            else:
                snapshot_name = "snap_%s" % str(backup_index)
                snapshot_option = ""
                snapshot_file_name = os.path.join(tmp_dir, snapshot_name)
                for disk_name in disks_not_tested:
                    snapshot_option += "--diskspec %s,snapshot=no " % disk_name
                snapshot_option += "--diskspec %s,file=%s" % (
                    original_disk_target, snapshot_file_name)
                virsh.snapshot_create_as(vm_name,
                                         "%s --disk-only %s" %
                                         (snapshot_name, snapshot_option),
                                         debug=True)
                snapshot_list.append(snapshot_name)

            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {"name": "localhost", "port": nbd_tcp_port}
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_file_name = "scratch_file_%s" % backup_index
                    scratch_file_path = os.path.join(tmp_dir,
                                                     scratch_file_name)
                    scratch_params["attrs"]["file"] = scratch_file_path
                    logging.debug("scratch_params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_hostname": "localhost",
                "nbd_export": nbd_export_name,
                "nbd_tcp_port": nbd_tcp_port
            }
            if not is_incremental:
                # Do full backup
                utils_backup.pull_full_backup_to_file(nbd_params,
                                                      backup_file_path)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            virsh.domjobabort(vm_name, debug=True)
            # Start to run the blockcommit/blockpull cmd before the last round
            # of backup job, this is to test if the block command will keep the
            # dirty bitmap data.
            if backup_index == backup_rounds - 2:
                run_blk_cmd()
                cur_disk_path = vm.get_blk_devices(
                )[original_disk_target]['source']

            if backup_index == backup_rounds - 3:
                backend_img = vm.get_blk_devices(
                )[original_disk_target]['source']

        # Get current active image for the test disk
        vm_disks = vm.get_blk_devices()
        current_active_image = vm_disks[original_disk_target]['source']
        logging.debug("The current active image for '%s' is '%s'",
                      original_disk_target, current_active_image)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name,
                                    checkpoint_name,
                                    debug=True,
                                    ignore_status=False)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data

        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (
            current_active_image, original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" %
                          (current_active_image, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints' metadata again to make sure vm has no checkpoints
        if "checkpoint_list" in locals():
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name,
                                        checkpoint_name,
                                        options="--metadata")
        # Remove snapshots
        if "snapshot_list" in locals():
            for snapshot_name in snapshot_list:
                virsh.snapshot_delete(vm_name,
                                      "%s --metadata" % snapshot_name,
                                      debug=True)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        for file_name in os.listdir(tmp_dir):
            file_path = os.path.join(tmp_dir, file_name)
            if 'env' not in file_path:
                if os.path.isfile(file_path):
                    os.remove(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
예제 #16
0
def run(test, params, env):
    """
    Test multi disk suport of guest, this case will:
    1) Create disks image in configuration file.
    2) Start the guest with those disks.
    3) Checks qtree vs. test params. (Optional)
    4) Create partition on those disks.
    5) Get disk dev filenames in guest.
    6) Format those disks in guest.
    7) Copy file into / out of those disks.
    8) Compare the original file and the copied file using md5 or fc comand.
    9) Repeat steps 3-5 if needed.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _add_param(name, value):
        """ Converts name+value to stg_params string """
        if value:
            value = re.sub(' ', '\\ ', value)
            return " %s:%s " % (name, value)
        else:
            return ''

    def _do_post_cmd(session):
        cmd = params.get("post_cmd")
        if cmd:
            session.cmd_status_output(cmd)
        session.close()

    error_context.context("Parsing test configuration", logging.info)
    stg_image_num = 0
    stg_params = params.get("stg_params", "")
    # Compatibility
    stg_params += _add_param("image_size", params.get("stg_image_size"))
    stg_params += _add_param("image_format", params.get("stg_image_format"))
    stg_params += _add_param("image_boot", params.get("stg_image_boot", "no"))
    stg_params += _add_param("drive_format", params.get("stg_drive_format"))
    stg_params += _add_param("drive_cache", params.get("stg_drive_cache"))
    if params.get("stg_assign_index") != "no":
        # Assume 0 and 1 are already occupied (hd0 and cdrom)
        stg_params += _add_param("drive_index", 'range(2,n)')
    param_matrix = {}

    stg_params = stg_params.split(' ')
    i = 0
    while i < len(stg_params) - 1:
        if not stg_params[i].strip():
            i += 1
            continue
        if stg_params[i][-1] == '\\':
            stg_params[i] = '%s %s' % (stg_params[i][:-1],
                                       stg_params.pop(i + 1))
        i += 1

    rerange = []
    has_name = False
    for i in range(len(stg_params)):
        if not stg_params[i].strip():
            continue
        (cmd, parm) = stg_params[i].split(':', 1)
        if cmd == "image_name":
            has_name = True
        if _RE_RANGE1.match(parm):
            parm = _range(parm)
            if parm is False:
                test.error("Incorrect cfg: stg_params %s looks "
                           "like range(..) but doesn't contain "
                           "numbers." % cmd)
            param_matrix[cmd] = parm
            if type(parm) is str:
                # When we know the stg_image_num, substitute it.
                rerange.append(cmd)
                continue
        else:
            # ',' separated list of values
            parm = parm.split(',')
            j = 0
            while j < len(parm) - 1:
                if parm[j][-1] == '\\':
                    parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1))
                j += 1
            param_matrix[cmd] = parm
        stg_image_num = max(stg_image_num, len(parm))

    stg_image_num = int(params.get('stg_image_num', stg_image_num))
    for cmd in rerange:
        param_matrix[cmd] = _range(param_matrix[cmd], stg_image_num)
    # param_table* are for pretty print of param_matrix
    param_table = []
    param_table_header = ['name']
    if not has_name:
        param_table_header.append('image_name')
    for _ in param_matrix:
        param_table_header.append(_)

    stg_image_name = params.get('stg_image_name', 'images/%s')
    for i in range(stg_image_num):
        name = "stg%d" % i
        params['images'] += " %s" % name
        param_table.append([])
        param_table[-1].append(name)
        if not has_name:
            params["image_name_%s" % name] = stg_image_name % name
            param_table[-1].append(params.get("image_name_%s" % name))
        for parm in param_matrix.items():
            params['%s_%s' % (parm[0], name)] = str(parm[1][i % len(parm[1])])
            param_table[-1].append(params.get('%s_%s' % (parm[0], name)))

    if params.get("multi_disk_params_only") == 'yes':
        # Only print the test param_matrix and finish
        logging.info('Newly added disks:\n%s',
                     astring.tabular_output(param_table, param_table_header))
        return

    # Always recreate VMs and disks
    error_context.context("Start the guest with new disks", logging.info)
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        env_process.process_images(env_process.preprocess_image, test,
                                   vm_params)

    error_context.context("Start the guest with those disks", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.create(timeout=max(10, stg_image_num), params=params)
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)

    n_repeat = int(params.get("n_repeat", "1"))
    file_system = [_.strip() for _ in params["file_system"].split()]
    cmd_timeout = float(params.get("cmd_timeout", 360))
    black_list = params["black_list"].split()
    drive_letters = int(params.get("drive_letters", "26"))
    stg_image_size = params["stg_image_size"]
    dd_test = params.get("dd_test", "no")
    pre_command = params.get("pre_command", "")
    labeltype = params.get("labeltype", "gpt")
    iozone_target_num = int(params.get('iozone_target_num', '5'))
    iozone_options = params.get('iozone_options')
    iozone_timeout = float(params.get('iozone_timeout', '7200'))

    have_qtree = True
    out = vm.monitor.human_monitor_cmd("info qtree", debug=False)
    if "unknown command" in str(out):
        have_qtree = False

    if (params.get("check_guest_proc_scsi") == "yes") and have_qtree:
        error_context.context("Verifying qtree vs. test params")
        err = 0
        qtree = qemu_qtree.QtreeContainer()
        qtree.parse_info_qtree(vm.monitor.info('qtree'))
        disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
        (tmp1, tmp2) = disks.parse_info_block(vm.monitor.info_block())
        err += tmp1 + tmp2
        err += disks.generate_params()
        err += disks.check_disk_params(params)
        (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(
            session.cmd_output('cat /proc/scsi/scsi'))
        err += tmp1 + tmp2

        if err:
            test.fail("%s errors occurred while verifying qtree vs."
                      " params" % err)
        if params.get('multi_disk_only_qtree') == 'yes':
            return
    try:
        err_msg = "Set disks num: %d" % stg_image_num
        err_msg += ", Get disks num in guest: %d"
        ostype = params["os_type"]
        if ostype == "windows":
            error_context.context(
                "Get windows disk index that to "
                "be formatted", logging.info)
            disks = utils_disk.get_windows_disks_index(session, stg_image_size)
            if len(disks) < stg_image_num:
                test.fail("Fail to list all the volumes"
                          ", %s" % err_msg % len(disks))
            if len(disks) > drive_letters:
                black_list.extend(utils_misc.get_winutils_vol(session))
                disks = random.sample(disks, drive_letters - len(black_list))
            error_context.context(
                "Clear readonly for all disks and online "
                "them in windows guest.", logging.info)
            if not utils_disk.update_windows_disk_attributes(session, disks):
                test.fail("Failed to update windows disk attributes.")
            dd_test = "no"
        else:
            error_context.context("Get linux disk that to be "
                                  "formatted", logging.info)
            disks = sorted(utils_disk.get_linux_disks(session).keys())
            if len(disks) < stg_image_num:
                test.fail("Fail to list all the volumes"
                          ", %s" % err_msg % len(disks))
    except Exception:
        _do_post_cmd(session)
        raise
    try:
        if iozone_options:
            iozone = generate_instance(params, session, 'iozone')
            random.shuffle(disks)
        for i in range(n_repeat):
            logging.info("iterations: %s", (i + 1))
            for n, disk in enumerate(disks):
                error_context.context("Format disk in guest: '%s'" % disk,
                                      logging.info)
                # Random select one file system from file_system
                index = random.randint(0, (len(file_system) - 1))
                fstype = file_system[index].strip()
                partitions = utils_disk.configure_empty_disk(
                    session,
                    disk,
                    stg_image_size,
                    ostype,
                    fstype=fstype,
                    labeltype=labeltype)
                if not partitions:
                    test.fail("Fail to format disks.")
                cmd_list = params["cmd_list"]
                for partition in partitions:
                    orig_partition = partition
                    if "/" not in partition:
                        partition += ":"
                    else:
                        partition = partition.split("/")[-1]
                    error_context.context(
                        "Copy file into / out of partition:"
                        " %s..." % partition, logging.info)
                    for cmd_l in cmd_list.split():
                        cmd = params.get(cmd_l)
                        if cmd:
                            session.cmd(cmd % partition, timeout=cmd_timeout)
                    cmd = params["compare_command"]
                    key_word = params["check_result_key_word"]
                    output = session.cmd_output(cmd)
                    if iozone_options and n < iozone_target_num:
                        iozone.run(iozone_options.format(orig_partition),
                                   iozone_timeout)
                    if key_word not in output:
                        test.fail("Files on guest os root fs and disk differ")
                    if dd_test != "no":
                        error_context.context(
                            "dd test on partition: %s..." % partition,
                            logging.info)
                        status, output = session.cmd_status_output(
                            dd_test % (partition, partition),
                            timeout=cmd_timeout)
                        if status != 0:
                            test.fail("dd test fail: %s" % output)
                    # When multiple SCSI disks are simulated by scsi_debug,
                    # they could be viewed as multiple paths to the same
                    # storage device. So need umount partition before operate
                    # next disk, in order to avoid corrupting the filesystem
                    # (xfs integrity checks error).
                    if ostype == "linux" and "scsi_debug add_host" in pre_command:
                        status, output = session.cmd_status_output(
                            "umount /dev/%s" % partition, timeout=cmd_timeout)
                        if status != 0:
                            test.fail("Failed to umount partition '%s': %s" %
                                      (partition, output))
            need_reboot = params.get("need_reboot", "no")
            need_shutdown = params.get("need_shutdown", "no")
            if need_reboot == "yes":
                error_context.context("Rebooting guest ...", logging.info)
                session = vm.reboot(session=session, timeout=login_timeout)
            if need_shutdown == "yes":
                error_context.context("Shutting down guest ...", logging.info)
                vm.graceful_shutdown(timeout=login_timeout)
                if vm.is_alive():
                    test.fail("Fail to shut down guest.")
                error_context.context("Start the guest again.", logging.info)
                vm = env.get_vm(params["main_vm"])
                vm.create(params=params)
                session = vm.wait_for_login(timeout=login_timeout)
            error_context.context("Delete partitions in guest.", logging.info)
            for disk in disks:
                utils_disk.clean_partition(session, disk, ostype)
    finally:
        if iozone_options:
            iozone.clean()
        _do_post_cmd(session)
예제 #17
0
def run(test, params, env):
    """
    Special hardware test case.
    FC host: ibm-x3650m4-05.lab.eng.pek2.redhat.com
    Disk serial name: scsi-360050763008084e6e0000000000001a4
    # multipath -ll
    mpathb (360050763008084e6e0000000000001a8) dm-4 IBM,2145
    size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
    |-+- policy='service-time 0' prio=50 status=active
    | `- 2:0:1:0 sde 8:64 active ready running
    `-+- policy='service-time 0' prio=10 status=enabled
      `- 2:0:0:0 sdd 8:48 active ready running
    mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
    size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
    |-+- policy='service-time 0' prio=50 status=active
    | `- 1:0:1:0 sdc 8:32 active ready running
    `-+- policy='service-time 0' prio=10 status=enabled
      `- 1:0:0:0 sdb 8:16 active ready running
    Customer Bug ID: 1741937  1673546

    Test if VM paused/resume when fc storage offline/online.

    1) pass-through /dev/mapper/mpatha
    2) install guest on pass-through disk
    3) Disconnect the storage during installation
    4) Check if VM status is 'paused'
    5) Connect the storage, Wait until the storage is accessible again
    6) resume the vm
    7) Check if VM status is 'running'
    8) installation completed successfully
    9) re-pass-through /dev/mapper/mpatha
    10) fio test on pass-through disk
    11) Disconnect any path of multipath during fio testing
    12) Check if VM status is 'paused'
    13) Connect the storage, Wait until the storage is accessible again
    14) resume the vm
    15) fio testing completed successfully

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def check_vm_status(vm, status):
        """
        Check if VM has the given status or not.

        :param vm: VM object.
        :param status: String with desired status.
        :return: True if VM status matches our desired status.
        :return: False if VM status does not match our desired status.
        """
        try:
            current_status = vm.monitor.get_status()
            vm.verify_status(status)
        except (virt_vm.VMStatusError, qemu_monitor.MonitorLockError):
            logging.info("Failed to check vm status, it is '%s' "
                         "instead of '%s'" % (current_status, status))
            return False
        except Exception as e:
            logging.info("Failed to check vm status: %s" % six.text_type(e))
            logging.info("vm status is '%s' instead of"
                         " '%s'" % (current_status, status))
            return False
        else:
            logging.info("Check vm status successfully. It is '%s'" % status)
            return True

    def get_multipath_disks(mpath_name="mpatha"):
        """
        Get all disks of multiple paths.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16  active ready running

        :param mpath_name: multi-path name.
        :return: a list. if get disks successfully or raise a error
        """
        logging.info("get_multipath_disks:" + mpath_name)
        disks = []
        disk_str = []
        outputs = process.run("multipath -ll " + mpath_name,
                              shell=True).stdout.decode()
        outputs = outputs.split(mpath_name)[-1]
        disk_str.append("active ready running")
        disk_str.append("active faulty offline")
        disk_str.append("failed faulty offline")
        disk_str.append("failed ready running")
        for line in outputs.splitlines():
            if disk_str[0] in line or disk_str[1] in line or disk_str[2] \
                    in line or disk_str[3] in line:
                disks.append(line.split()[-5])
        if not disks:
            test.fail("Failed to get disks by 'multipath -ll'")
        else:
            return disks

    def get_multipath_disks_status(mpath_name="mpatha"):
        """
        Get status of multiple paths.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16  active ready running

        :param mpath_name: multi-path name.
        :return: a dict. e.g. {"sdb": "running", "sdc": "running"}
        """
        disks = get_multipath_disks(mpath_name)
        disks_status = {}
        outputs = process.run("multipath -ll " + mpath_name,
                              shell=True).stdout.decode()
        outputs = outputs.split(mpath_name)[-1]
        for line in outputs.splitlines():
            for i in range(len(disks)):
                if disks[i] in line:
                    disks_status[disks[i]] = line.strip().split()[-1]
                    break
        if not disks_status or len(disks_status) != len(disks):
            logging.info("Failed to get disks status by 'multipath -ll'")
            return {}
        else:
            return disks_status

    def compare_onepath_status(status, disk):
        """
        Compare status whether equal to the given status.
        This function just focus on all paths are running or all are offline.

        :param status: the state of disks.
        :param disk: disk kname.
        :return: True, if equal to the given status or False
        """
        status_dict = get_multipath_disks_status(mpath_name)
        logging.debug("compare_onepath_status disk:", disk, status_dict,
                      status)
        if disk in status_dict.keys() and status == status_dict[disk]:
            return True
        else:
            return False

    def compare_multipath_status(status, mpath_name="mpatha"):
        """
        Compare status whether equal to the given status.
        This function just focus on all paths are running or all are offline.

        :param status: the state of disks.
        :param mpath_name: multi-path name.
        :return: True, if equal to the given status or False
        """
        status_dict = get_multipath_disks_status(mpath_name)
        logging.debug("compare_multipath_status mpath_name:", mpath_name,
                      status_dict, status)
        if len(set(
                status_dict.values())) == 1 and status in status_dict.values():
            return True
        else:
            return False

    def set_disk_status_to_online_offline(disk, status):
        """
        set disk state to online/offline.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16 failed faulty offline

        :param disk: disk name.
        :param status: the state of disk.
        :return: by default
        """
        error_context.context("Set disk '%s' to status '%s'." % (disk, status),
                              logging.info)
        process.run("echo %s >  /sys/block/%s/device/state" % (status, disk),
                    shell=True)

    def set_multipath_disks_status(disks, status):
        """
        set multiple paths to same status. all disks online or offline.
        multipath like below:
        mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145
        size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
        |-+- policy='service-time 0' prio=50 status=active
        | `- 1:0:1:0 sdc 8:32  active ready running
        `-+- policy='service-time 0' prio=10 status=enabled
          `- 1:0:0:0 sdb 8:16 failed faulty offline

        :param disks: disk list.
        :param status: the state of disk. online/offline
        :return: by default
        """
        for disk in disks:
            set_disk_status_to_online_offline(disk, status)
            time.sleep(2)
        if len(disks) == 1:
            wait.wait_for(lambda: compare_onepath_status(status, disks[0]),
                          first=wait_time,
                          step=3,
                          timeout=60)
        else:
            wait.wait_for(lambda: compare_multipath_status(status),
                          first=wait_time,
                          step=3,
                          timeout=60)

    def get_lvm_dm_name(blkdevs_used):
        """
        Get dm name for lvm. such as rhel_ibm--x3650m4--05-root in below
        NAME                           MAJ:MIN RM   SIZE RO TYPE  MOUNTPOINT
        sda                              8:0    0 278.9G  0 disk
        ├─sda1                           8:1    0     1G  0 part  /boot
        └─sda2                           8:2    0 277.9G  0 part
            ├─rhel_ibm--x3650m4--05-root 253:0    0    50G  0 lvm   /
            ├─rhel_ibm--x3650m4--05-swap 253:1    0  15.7G  0 lvm   [SWAP]
            └─rhel_ibm--x3650m4--05-home 253:2    0 212.2G  0 lvm   /home
        # ls /dev/mapper/* -l
        crw-------. 1 root root 10, 236 Oct 25 02:07 /dev/mapper/control
        lrwxrwxrwx. 1 root root       7 Oct 25 09:26 /dev/mapper/mpatha -> ../dm-3
        lrwxrwxrwx. 1 root root       7 Oct 25 09:26 /dev/mapper/mpatha1 -> ../dm-5
        lrwxrwxrwx. 1 root root       7 Oct 25 09:26 /dev/mapper/mpatha2 -> ../dm-6
        lrwxrwxrwx. 1 root root       7 Oct 25 06:49 /dev/mapper/mpathb -> ../dm-4
        lrwxrwxrwx. 1 root root       7 Oct 25 09:17 /dev/mapper/rhel_bootp--73--199--5-home -> ../dm-8
        lrwxrwxrwx. 1 root root       7 Oct 25 09:17 /dev/mapper/rhel_bootp--73--199--5-root -> ../dm-9
        lrwxrwxrwx. 1 root root       7 Oct 25 09:17 /dev/mapper/rhel_bootp--73--199--5-swap -> ../dm-7
        lrwxrwxrwx. 1 root root       7 Oct 25 02:07 /dev/mapper/rhel_ibm--x3650m4--05-home -> ../dm-2
        lrwxrwxrwx. 1 root root       7 Oct 25 02:07 /dev/mapper/rhel_ibm--x3650m4--05-root -> ../dm-0
        lrwxrwxrwx. 1 root root       7 Oct 25 02:07 /dev/mapper/rhel_ibm--x3650m4--05-swap -> ../dm-1
        -rw-r--r--. 1 root root       0 Oct 25 07:52 /dev/mapper/vg_raid10-lv_home
        # dmsetup info -c -o name,blkdevs_used
        Name                        BlkDevNamesUsed
        rhel_bootp--73--199--5-home dm-6
        mpathb                      sdd,sde
        mpatha                      sdb,sdc
        mpatha2                     dm-3
        rhel_bootp--73--199--5-swap dm-6
        rhel_bootp--73--199--5-root dm-6
        mpatha1                     dm-3
        rhel_ibm--x3650m4--05-home  sda2
        rhel_ibm--x3650m4--05-swap  sda2
        rhel_ibm--x3650m4--05-root  sda2

        :param blkdevs_used: block name, e.g. sda2
        :return: a list contains all dm name for one blkdev
        """
        dm_list = []
        logging.info("Get dm name for '%s'" % blkdevs_used)
        output = process.run("ls /dev/mapper/* -l", shell=True).stdout.decode()
        for line in output.splitlines():
            if blkdevs_used in line:
                dm_name = line.split("/")[-1]
                break
        output = process.run("dmsetup info -c -o name,blkdevs_used",
                             shell=True).stdout.decode()
        for line in output.splitlines():
            if dm_name == line.split()[-1]:
                dm_list.append(line.split()[0])
        return dm_list

    def delete_lvm_on_multipath(mpath_name="mpatha"):
        """
        Delete lvm on the given multipath.

        :param mpath_name: multi-path name.
        :return: by default.
        """
        output = process.run("pvscan", shell=True).stdout.decode()
        pv_list = []
        vg_list = []
        lv_list = []
        for line in output.splitlines():
            if mpath_name in line:
                if line.split()[1] not in pv_list:
                    pv_list.append(line.split()[1])
                if line.split()[3] not in vg_list:
                    vg_list.append(line.split()[3])
        output = process.run("lvscan", shell=True).stdout.decode()
        for line in output.splitlines():
            for vg in vg_list:
                lv = "/dev/%s/" % vg
                if lv in line and line.split("'")[1] not in lv_list:
                    lv_list.append(line.split("'")[1])
        logging.info("pv list: %s" % pv_list)
        logging.info("vg list: %s" % vg_list)
        logging.info("lv list: %s" % lv_list)
        for lv in lv_list:
            logging.info("Remove lvm '%s'." % lv)
            process.run("lvremove -y %s" % lv, ignore_status=True, shell=True)
        for vg in vg_list:
            logging.info("Remove vg '%s'." % vg)
            process.run("vgremove -y %s" % vg, ignore_status=True, shell=True)
        for pv in pv_list:
            pv_name = pv.split("/")[-1]
            for dm in get_lvm_dm_name(pv_name):
                process.run("dmsetup remove %s" % dm,
                            ignore_status=True,
                            shell=True)
            logging.info("Remove pv '%s'." % pv)
            process.run("pvremove -y %s" % pv, ignore_status=True, shell=True)

    def delete_partition_on_host(did):
        """
        Delete partitions on the given disk.

        :param did: disk ID. disk kname. e.g. 'sdb', 'nvme0n1'
        :return: by default.
        """
        # process.run("partprobe /dev/%s" % did, shell=True)
        list_disk_cmd = "lsblk -o KNAME,MOUNTPOINT"
        output = process.run(list_disk_cmd, shell=True).stdout.decode()
        regex_str = did + "\w*(\d+)"
        rm_cmd = 'parted -s "/dev/%s" rm %s'
        for line in output.splitlines():
            partition = re.findall(regex_str, line, re.I | re.M)
            if partition:
                if "/" in line.split()[-1]:
                    process.run("umount %s" % line.split()[-1], shell=True)
        list_partition_number = "parted -s /dev/%s print|awk '/^ / {print $1}'"
        partition_numbers = process.run(list_partition_number % did,
                                        ignore_status=True,
                                        shell=True).stdout.decode()
        ignore_err_msg = "unrecognised disk label"
        if ignore_err_msg in partition_numbers:
            logging.info("no partition to delete on %s" % did)
        else:
            partition_numbers = partition_numbers.splitlines()
            for number in partition_numbers:
                logging.info("remove partition %s on %s" % (number, did))
                process.run(rm_cmd % (did, number),
                            ignore_status=True,
                            shell=True)
            process.run("partprobe /dev/%s" % did, shell=True)

    def delete_multipath_partition_on_host(mpath_name="mpatha"):
        """
        Delete partitions on the given multipath.

        :param mpath_name: multi-path name.
        :return: by default.
        """
        # process.run("partprobe /dev/mapper/%s" % mpath_name, shell=True)
        output = process.run("lsblk", shell=True).stdout.decode()
        mpath_dict = {}
        pattern = r'%s(\d+)' % mpath_name
        rm_cmd = 'parted -s "/dev/mapper/%s" rm %s'
        for line in output.splitlines():
            for part_num in re.findall(pattern, line, re.I | re.M):
                if part_num not in mpath_dict.keys():
                    mpath_dict[part_num] = line.split()[-1]
        for key, value in mpath_dict.items():
            if "/" in value:
                process.run("umount %s" % value, shell=True)
            logging.info("remove partition %s on %s" % (key, mpath_name))
            process.run(rm_cmd % (mpath_name, key),
                        ignore_status=True,
                        shell=True)
        output = process.run("dmsetup ls", shell=True).stdout.decode()
        for line in output.splitlines():
            for key in mpath_dict.keys():
                if (mpath_name + key) in line:
                    process.run("dmsetup remove %s%s" % (mpath_name, key),
                                ignore_status=True,
                                shell=True)
        process.run("partprobe /dev/mapper/%s" % mpath_name, shell=True)

    def clean_partition_on_host(mpath_name="mpatha"):
        """
        Delete partitions on multi-path disks.

        :param mpath_name: multi-path name.
        :return: by default
        """
        delete_multipath_partition_on_host(mpath_name)
        disks = get_multipath_disks(mpath_name)
        for disk in disks:
            delete_partition_on_host(disk)

    def _run_fio_background(session, filename):
        """run fio testing by thread"""

        logging.info("Start fio in background.")
        cmd = fio_multipath.cfg.fio_path + params['fio_options'] % filename
        logging.info(cmd)
        session.cmdline(cmd)
        # args = (params['fio_options'] % filename, 3600)
        # fio_thread = utils_misc.InterruptedThread(fio_multipath.run, args)
        # fio_thread.start()
        # if not utils_misc.wait_for(lambda: fio_thread.is_alive, 60):
        #     test.fail("Failed to start fio thread.")
        # return fio_thread

    # def _run_fio_background(filename):
    #     """run fio testing by thread"""
    #
    #     logging.info("Start fio in background.")
    #     args = (params['fio_options'] % filename, 3600)
    #     fio_thread = utils_misc.InterruptedThread(fio_multipath.run, args)
    #     fio_thread.start()
    #     if not utils_misc.wait_for(lambda: fio_thread.is_alive, 60):
    #         test.fail("Failed to start fio thread.")
    #     return fio_thread

    def _get_windows_disks_index(session, image_size):
        """
        Get all disks index which show in 'diskpart list disk'.
        except for system disk.
        in diskpart: if disk size < 8GB: it displays as MB
                     else: it displays as GB

        :param session: session object to guest.
        :param image_size: image size. e.g. 40M
        :return: a list with all disks index except for system disk.
        """
        disk = "disk_" + ''.join(
            random.sample(string.ascii_letters + string.digits, 4))
        disk_indexs = []
        list_disk_cmd = "echo list disk > " + disk
        list_disk_cmd += " && echo exit >> " + disk
        list_disk_cmd += " && diskpart /s " + disk
        list_disk_cmd += " && del /f " + disk
        disks = session.cmd_output(list_disk_cmd)
        size_type = image_size[-1] + "B"
        if size_type == "MB":
            disk_size = image_size[:-1] + " MB"
        elif size_type == "GB" and int(image_size[:-1]) < 8:
            disk_size = str(int(image_size[:-1]) * 1024) + " MB"
        else:
            disk_size = image_size[:-1] + " GB"
        regex_str = 'Disk (\d+).*?%s' % disk_size
        for disk in disks.splitlines():
            if disk.startswith("  Disk"):
                o = re.findall(regex_str, disk, re.I | re.M)
                if o:
                    disk_indexs.append(o[0])
        return disk_indexs

    def resume_vm_plus(vm, timeout=120):
        """resume vm when it is paused.

        :param vm: VM object.
        :param timeout: re-try times, if it is still paused after resume
        :return: True or None.
                 If resume successfully return True or return None
        """
        logging.info("Try to resume vm within %s seconds." % timeout)
        try:
            vm_status = vm.resume(timeout=timeout)
        except Exception as e:
            logging.error("Failed to resume vm: %s" % six.text_type(e))
        return vm_status

    def resume_vm(vm, n_repeat=2):
        """resume vm when it is paused.

        :param vm: VM object.
        :param n_repeat: re-try times, if it is still paused after resume
        :return: True or False.
                 If resume successfully return True or return False
        """
        for i in range(1, n_repeat + 1):
            logging.info("Try to resume vm %s time(s)" % i)
            try:
                vm.resume()
                time.sleep(wait_time * 15)
            except Exception as e:
                logging.error("Failed to resume vm: %s" % six.text_type(e))
            finally:
                if check_vm_status(vm, "running"):
                    return True
                if vm.is_paused() and i == 3:
                    return False

    error_context.context("Get FC host name:", logging.info)
    hostname = process.run("hostname", shell=True).stdout.decode().strip()
    if hostname != params["special_host"]:
        test.cancel("The special host is not '%s', cancel the test." %
                    params["special_host"])
    error_context.context("Get FC disk serial name:", logging.info)
    outputs = process.run("multipath -ll",
                          shell=True).stdout.decode().splitlines()
    stg_serial_name = params["stg_serial_name"]
    image_name_stg = params["image_name_stg"]
    mpath_name = image_name_stg.split("/")[-1]
    for output in outputs:
        if stg_serial_name in output and mpath_name in output:
            break
    else:
        test.cancel("The special disk is not '%s', cancel the test." %
                    stg_serial_name)
    wait_time = float(params.get("sub_test_wait_time", 0))
    repeat_times = int(params.get("repeat_times", 2))
    multi_disks = get_multipath_disks(mpath_name)
    error_context.context(
        "Get all disks for '%s': %s" % (mpath_name, multi_disks), logging.info)
    error_context.context(
        "Verify all paths are running for %s before"
        "start vm." % mpath_name, logging.info)
    if compare_multipath_status("running", mpath_name):
        logging.info("All paths are running for %s." % mpath_name)
    else:
        logging.info("Not all paths are running for %s, set "
                     "them to running." % mpath_name)
        set_multipath_disks_status(multi_disks, "running")
    error_context.context(
        "Delete lvm on multipath disks on host "
        "before testing.", logging.info)
    delete_lvm_on_multipath(mpath_name)
    error_context.context("Delete partitions on host before testing.",
                          logging.info)
    clean_partition_on_host(mpath_name)
    vm = env.get_vm(params["main_vm"])
    try:
        if params.get("need_install") == "yes":
            error_context.context("Install guest on passthrough disk:",
                                  logging.info)
            args = (test, params, env)
            bg = utils_misc.InterruptedThread(
                utils_test.run_virt_sub_test, args,
                {"sub_type": "unattended_install"})
            bg.start()
            utils_misc.wait_for(bg.is_alive, timeout=10)
            time.sleep(random.uniform(60, 180))
        else:
            vm.create(params=params)
            session = vm.wait_for_login(
                timeout=int(params.get("timeout", 240)))
            fio_multipath = generate_instance(params, vm, 'fio')
            image_size_stg = params["image_size_stg"]
            image_num_stg = int(params["image_num_stg"])
            os_type = params["os_type"]
    except Exception as e:
        test.error("failed to create VM: %s" % six.text_type(e))
    try:
        error_context.context("Make sure guest is running before test",
                              logging.info)
        if vm.is_paused():
            vm.resume()
        vm.verify_status("running")
        if "fio_multipath" in locals().keys():
            if os_type == "windows":
                error_context.context(
                    "Get windows disk index that to "
                    "be formatted", logging.info)
                disks = _get_windows_disks_index(session, image_size_stg)
                if len(disks) != image_num_stg:
                    test.fail(
                        "Failed to list all disks by image size. The expected "
                        "is %s, actual is %s" % (image_num_stg, len(disks)))
                error_context.context(
                    "Clear readonly for all disks and online "
                    "them in windows guest.", logging.info)
                if not utils_disk.update_windows_disk_attributes(
                        session, disks):
                    test.fail("Failed to update windows disk attributes.")
            else:
                error_context.context("Get linux disk that to be "
                                      "formatted", logging.info)
                disks = sorted(utils_disk.get_linux_disks(session).keys())
                if len(disks) != image_num_stg:
                    test.fail(
                        "Failed to list all disks by image size. The expected "
                        "is %s, actual is %s" % (image_num_stg, len(disks)))
            file_system = [_.strip() for _ in params["file_system"].split()]
            labeltype = params.get("labeltype", "gpt")
            for n, disk in enumerate(disks):
                error_context.context("Format disk in guest: '%s'" % disk,
                                      logging.info)
                # Random select one file system from file_system
                index = random.randint(0, (len(file_system) - 1))
                fstype = file_system[index].strip()
                partitions = utils_disk.configure_empty_disk(
                    session,
                    disk,
                    image_size_stg,
                    os_type,
                    fstype=fstype,
                    labeltype=labeltype)
                if not partitions:
                    test.fail("Fail to format disks.")
            fio_file_name = params["fio_file_name"] % partitions[0]
            # fio_thread = _run_fio_background(fio_file_name)
            _run_fio_background(session, fio_file_name)
        # disk = random.sample(multi_disks, 1)
        for disk in multi_disks:
            error_context.context(
                "Disable disk %s during guest running" % disk, logging.info)
            set_multipath_disks_status([disk], "offline")
            time.sleep(wait_time * 15)
            if vm.is_paused():
                logging.info("vm is paused, will resume it.")
                if not resume_vm(vm, repeat_times):
                    test.fail("Failed to resume guest after disable one disk")
                logging.info("Has resumed vm already. Then verify it running.")
                if not utils_misc.wait_for(
                        lambda: check_vm_status(vm, "running"), 60):
                    test.fail("Guest is not running after disable one disk")
            error_context.context("Enable disk %s during guest running" % disk,
                                  logging.info)
            set_multipath_disks_status([disk], "running")
            time.sleep(wait_time * 15)
        error_context.context(
            "Disable multipath '%s' during guest "
            "running." % mpath_name, logging.info)
        set_multipath_disks_status(multi_disks, "offline")
        time.sleep(wait_time * 15)
        error_context.context("Check if VM status is 'paused'", logging.info)
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "paused"), 120):
            test.fail("Guest is not paused after all disks offline")
        error_context.context(
            "Re-connect fc storage, wait until the "
            "storage is accessible again", logging.info)
        set_multipath_disks_status(multi_disks, "running")
        time.sleep(wait_time * 15)
        error_context.context("vm is paused, resume it.", logging.info)
        if not resume_vm(vm, repeat_times):
            test.fail("Failed to resume guest after enable multipath.")
        logging.info("Has resumed vm already. Then verify it running.")
        time.sleep(wait_time * 15)
        error_context.context("Check if VM status is 'running'", logging.info)
        if not utils_misc.wait_for(lambda: check_vm_status(vm, "running"),
                                   120):
            test.fail("Guest is not running after all disks online")
        if "bg" in locals().keys() and bg.is_alive and not vm.is_paused():
            bg.join()
        # wq comment ,why need wait fio
        # if "fio_thread" in locals().keys() and fio_thread.is_alive \
        #         and not vm.is_paused():
        #     fio_thread.join()
        error_context.context(
            "Verify Host and guest kernel no error "
            "and call trace", logging.info)
        vm.verify_kernel_crash()
    # except Exception as e:
    #     logging.error(e)
    finally:
        logging.info("Finally, clean environment.")
예제 #18
0
파일: dd_test.py 프로젝트: pezhang/tp-qemu
def run(test, params, env):
    """
    Executes dd with defined parameters and checks the return number and output

    Test steps:
    1). wait guest boot up
    2). run dd command in guest with special params(eg. oflag, bs and so on)
    3). check command exit stauts and output
    """
    def _get_file(filename, select, test=test):
        """ Picks the actual file based on select value """
        if filename == "NULL":
            return "/dev/null"
        elif filename == "ZERO":
            return "/dev/zero"
        elif filename == "RANDOM":
            return "/dev/random"
        elif filename == "URANDOM":
            return "/dev/urandom"
        elif filename in params.objects("images"):
            drive_id = params["blk_extra_params_%s" % filename].split("=")[1]
            drive_path = utils_misc.get_linux_drive_path(session, drive_id)
            if drive_path:
                return drive_path
            test.error("Failed to get '%s' drive path" % filename)
        else:
            # get all matching filenames
            try:
                disks = sorted(session.cmd("ls -1d %s" % filename).split('\n'))
            except aexpect.ShellCmdError:   # No matching file (creating new?)
                disks = [filename]
            if disks[-1] == '':
                disks = disks[:-1]
            try:
                return disks[select]
            except IndexError:
                err = ("Incorrect cfg: dd_select out of the range (disks=%s,"
                       " select=%s)" % (disks, select))
                logging.error(err)
                test.error(err)

    vm = env.get_vm(params['main_vm'])
    timeout = int(params.get("login_timeout", 360))

    error_context.context("Wait guest boot up", logging.info)
    session = vm.wait_for_login(timeout=timeout)

    dd_keys = ['dd_if', 'dd_of', 'dd_bs', 'dd_count', 'dd_iflag',
               'dd_oflag', 'dd_skip', 'dd_seek']

    dd_params = {key: params.get(key, None) for key in dd_keys}
    if dd_params['dd_bs'] is None:
        dd_params['dd_bs'] = '512'
    dd_params['dd_bs'] = dd_params['dd_bs'].split()
    bs_count = len(dd_params['dd_bs'])

    dd_timeout = int(params.get("dd_timeout", 180))
    dd_output = params.get("dd_output", "")
    dd_stat = int(params.get("dd_stat", 0))

    dev_partitioned = []
    for arg in ['dd_if', 'dd_of']:
        filename = dd_params[arg]
        path = _get_file(filename,
                         int(params.get('%s_select' % arg, '-1')))
        if (bs_count > 1
                and filename in params.objects('images')):
            psize = float(
                utils_numeric.normalize_data_size(
                    params.get("partition_size", '2G')
                    )
                )
            start = 0.0
            dev_id = os.path.split(path)[-1]
            dev_partitioned.append(dev_id)

            utils_disk.create_partition_table_linux(session, dev_id, 'gpt')
            for i in range(bs_count):
                utils_disk.create_partition_linux(session, dev_id,
                                                  '%fM' % psize,
                                                  '%fM' % start)
                start += psize

            disks = utils_disk.get_linux_disks(session, partition=True)
            partitions = [key for key in disks if
                          re.match(r'%s\d+$' % dev_id, key)]
            partitions.sort()
            dd_params[arg] = [path.replace(dev_id, part)
                              for part in partitions]
        else:
            dd_params[arg] = [path]

    if bs_count > 1 and not dev_partitioned:
        test.error('with multiple bs, either dd_if or \
                   dd_of must be a block device')

    dd_cmd = ['dd']
    for key in dd_keys:
        value = dd_params[key]
        if value is None:
            continue
        arg = key.split('_')[-1]
        if key in ['dd_if', 'dd_of', 'dd_bs']:
            part = '%s=%s' % (arg, '{}')
        else:
            part = '%s=%s' % (arg, value)
        dd_cmd.append(part)
    dd_cmd = ' '.join(dd_cmd)

    remaining = [dd_params[key] for key in ['dd_if', 'dd_of', 'dd_bs']]
    if len(dd_params['dd_if']) != bs_count:
        fillvalue = dd_params['dd_if'][-1]
    else:
        fillvalue = dd_params['dd_of'][-1]
    cmd = [dd_cmd.format(*t) for t in
           zip_longest(*remaining, fillvalue=fillvalue)]
    cmd = ' & '.join(cmd)
    logging.info("Using '%s' cmd", cmd)

    try:
        error_context.context("Execute dd in guest", logging.info)
        try:
            (stat, out) = session.cmd_status_output(cmd, timeout=dd_timeout)
        except aexpect.ShellTimeoutError:
            err = ("dd command timed-out (cmd='%s', timeout=%d)"
                   % (cmd, dd_timeout))
            test.fail(err)
        except aexpect.ShellCmdError as details:
            stat = details.status
            out = details.output

        error_context.context("Check command exit status and output",
                              logging.info)
        logging.debug("Returned dd_status: %s\nReturned output:\n%s",
                      stat, out)
        if stat != dd_stat:
            err = ("Return code doesn't match (expected=%s, actual=%s)\n"
                   "Output:\n%s" % (dd_stat, stat, out))
            test.fail(err)
        if dd_output not in out:
            err = ("Output doesn't match:\nExpected:\n%s\nActual:\n%s"
                   % (dd_output, out))
            test.fail(err)
        logging.info("dd test succeeded.")
    finally:
        for dev_id in dev_partitioned:
            utils_disk.clean_partition_linux(session, dev_id)
        session.close()
예제 #19
0
def run(test, params, env):
    """
    Test pure checkpoint commands
    """

    def prepare_checkpoints(disk="vdb", num=1, cp_prefix="test_checkpoint_"):
        """
        Create checkpoints for specific disk

        :param disk: The disk to create checkpoint.
        :param num: How many checkpoints to be created
        :param cp_prefix: The prefix to name the checkpoint.
        """
        option_pattern = ("{0} --diskspec vda,checkpoint=no "
                          "--diskspec {1},checkpoint=bitmap,bitmap={0}")
        for i in range(num):
            # create checkpoints
            checkpoint_name = cp_prefix + str(i)
            options = option_pattern.format(checkpoint_name, disk)
            virsh.checkpoint_create_as(vm_name, options, **virsh_dargs)
            current_checkpoints.append(checkpoint_name)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    checkpoint_cmd = params.get("checkpoint_cmd")
    cmd_flag = params.get("flag")
    required_checkpoints = int(params.get("required_checkpoints", 0))
    test_disk_size = params.get("test_disk_size", "100M")
    test_disk_target = params.get("test_disk_target", "vdb")
    status_error = "yes" == params.get("status_error")
    tmp_dir = data_dir.get_tmp_dir()
    current_checkpoints = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Prepare the disk to be used.
        disk_params = {}
        disk_path = ""
        image_name = "{}_image.qcow2".format(test_disk_target)
        disk_path = os.path.join(tmp_dir, image_name)
        libvirt.create_local_disk("file", disk_path, test_disk_size,
                                  "qcow2")
        disk_params = {"device_type": "disk",
                       "type_name": "file",
                       "driver_type": "qcow2",
                       "target_dev": test_disk_target,
                       "source_file": disk_path}
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml,
                            flagstr="--config", **virsh_dargs)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if required_checkpoints > 0:
            prepare_checkpoints(test_disk_target, required_checkpoints)
        if checkpoint_cmd == "checkpoint-create":
            if not current_checkpoints:
                test.fail("No existing checkpoints prepared.")
            if "--redefine" in cmd_flag:
                no_domain = "yes" == params.get("no_domain")
                extra_flag = params.get("extra_flag")
                image_with_bitmap = "yes" == params.get("image_with_bitmap")
                cp_dumpxml_options = ""
                if no_domain:
                    cp_dumpxml_options = "--no-domain"
                    if libvirt_version.version_compare(6, 6, 0):
                        # libvirt-6.6.0-9.el8 starts to allow redefine VM
                        # backup checkpoint without the domain XML (bz1901830)
                        status_error = False
                checkpoint_redef = current_checkpoints[0]
                cp_xml = checkpoint_xml.CheckpointXML.new_from_checkpoint_dumpxml(
                        vm_name, checkpoint_redef, cp_dumpxml_options)
                logging.debug("Checkpoint XML to be redefined is: %s", cp_xml)
                xml_file = cp_xml.xml
                virsh.checkpoint_delete(vm_name, checkpoint_redef,
                                        "--metadata", **virsh_dargs)
                cmd_options = xml_file + " " + cmd_flag
                if extra_flag:
                    cmd_options += " " + extra_flag
                    if "--redefine-validate" in extra_flag:
                        if not libvirt_version.version_compare(6, 6, 0):
                            test.cancel("--redefine-validate not supported in "
                                        "current libvirt versoin.")
                        if not image_with_bitmap:
                            status_error = True
                            # replace vdb's image with a new qcow2 file to make sure
                            # the image has no block dirty bitmap anymore
                            vm.destroy(gracefully=False)
                            libvirt.create_local_disk("file", disk_path,
                                                      test_disk_size, "qcow2")
                            vm.start()
                            vm.wait_for_login().close()
                result = virsh.checkpoint_create(vm_name, cmd_options, debug=True)
                libvirt.check_exit_status(result, status_error)
        elif checkpoint_cmd == "checkpoint-create-as":
            if "--print-xml" in cmd_flag:
                checkpoint_name = "test_checkpoint_0"
                options = ("{0} --diskspec vda,checkpoint=no --diskspec {1},"
                           "checkpoint=bitmap,bitmap={0} "
                           "--print-xml".format(checkpoint_name, test_disk_target))
                virsh.checkpoint_create_as(vm_name, options, **virsh_dargs)
                # The checkpiont should not be created, so we have following check
                cp_list_result = virsh.checkpoint_list(vm_name, checkpoint_name, debug=True)
                libvirt.check_exit_status(cp_list_result, True)
        elif checkpoint_cmd == "checkpoint-info":
            if len(current_checkpoints) != 3:
                test.fail("We should prepare 3 checkpoints.")
            parent_checkpoint = current_checkpoints[0]
            test_checkpoint = current_checkpoints[1]
            stdout = virsh.checkpoint_info(vm_name, test_checkpoint,
                                           **virsh_dargs).stdout_text.strip()
            if (
                    not re.search("domain.*%s" % vm_name, stdout, re.IGNORECASE) or
                    not re.search("parent.*%s" % parent_checkpoint, stdout, re.IGNORECASE) or
                    not re.search("children.*1", stdout, re.IGNORECASE) or
                    not re.search("descendants.*1", stdout, re.IGNORECASE)
               ):
                test.fail("checkpoint-info return inaccurate informaion: %s" % stdout)
        elif checkpoint_cmd == "checkpoint-list":
            logic_error = False
            if not cmd_flag:
                stdout = virsh.checkpoint_list(vm_name,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint not in stdout:
                        logic_error = True
            elif cmd_flag == "--parent":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint == current_checkpoints[-1]:
                        if stdout.count(checkpoint) != 1:
                            logic_error = True
                    else:
                        if stdout.count(checkpoint) != 2:
                            logic_error = True
            elif cmd_flag == "--roots":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint == current_checkpoints[0]:
                        if stdout.count(checkpoint) != 1:
                            logic_eror = True
                    else:
                        if stdout.count(checkpoint) != 0:
                            logic_error = True
            elif cmd_flag == "--tree":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                lines = stdout.splitlines()
                prev_indent_num = -1
                for line in lines:
                    for checkpoint in current_checkpoints:
                        if checkpoint in line:
                            cur_indent_num = line.rstrip().count(" ")
                            if cur_indent_num <= prev_indent_num:
                                logic_error = True
                                break
                            prev_indent_num = cur_indent_num
            elif cmd_flag == "--name":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                checkpoint_names = stdout.splitlines()
                if not operator.eq(checkpoint_names, current_checkpoints):
                    logic_error = True
            elif cmd_flag == "--topological":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint not in stdout:
                        logical_error = True
            elif cmd_flag == "--from":
                cmd_options = cmd_flag + " " + current_checkpoints[0]
                stdout = virsh.checkpoint_list(vm_name, cmd_options,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[2] in stdout
                        or current_checkpoints[1] not in stdout):
                    logic_error = True
            elif cmd_flag == "--descendants":
                cmd_options = cmd_flag + " " + current_checkpoints[0]
                stdout = virsh.checkpoint_list(vm_name, cmd_options,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[1] not in stdout
                        or current_checkpoints[2] not in stdout):
                    logic_error = True
            elif cmd_flag == "--no-leaves":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] not in stdout
                        or current_checkpoints[1] not in stdout
                        or current_checkpoints[2] in stdout):
                    logic_error = True
            elif cmd_flag == "--leaves":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[1] in stdout
                        or current_checkpoints[2] not in stdout):
                    logic_error = True
            if logic_error:
                test.fail("checkpoint-list with '%s' gives wrong output"
                          % cmd_flag)
        elif checkpoint_cmd == "checkpoint-dumpxml":
            if "--size" in cmd_flag:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Current libvirt version doesn't support "
                                "'--size' for 'checkpoint-dumpxml'.")
                test_disk = new_disks_in_vm[-1]
                test_disk_path = "/dev/" + test_disk
                test_checkpoint = current_checkpoints[-1]
                dd_count = 1
                dd_bs = "1M"
                dd_seek = "10"
                dd_size = dd_count * 1024 * 1024
                session = vm.wait_for_login()
                utils_disk.dd_data_to_vm_disk(session, test_disk_path,
                                              bs=dd_bs, seek=dd_seek,
                                              count=str(dd_count))
                session.close()
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint + " --size",
                                                  **virsh_dargs).stdout_text.strip()
                re_pattern = ".*%s.*%s.*size.*" % (test_disk, test_checkpoint)
                size_info_line = re.search(re_pattern, stdout)
                if not size_info_line:
                    test.fail("There is no size info for disk:%s checkpoint:%s"
                              % (test_disk, test_checkpoint))
                if str(dd_size) not in size_info_line.group(0):
                    test.fail("Size info incorrect in checkpoint xml, "
                              "'dd_size' is %s, size info in xml is:%s"
                              % (dd_size, size_info_line.group(0)))
            elif "--security-info" in cmd_flag:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                password = "******"
                vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': password})
                vm.start()
                vm.wait_for_login().close()
                prepare_checkpoints()
                test_checkpoint = current_checkpoints[0]
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint,
                                                  **virsh_dargs).stdout_text.strip()
                if password in stdout:
                    logging.debug("checkpoint xml is: %s", stdout)
                    test.fail("Security info displayed in unsecurity dumpxml.")
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint + " --security-info",
                                                  **virsh_dargs).stdout_text.strip()
                if password not in stdout:
                    logging.debug("checkpoint xml is: %s", stdout)
                    test.fail("Security info not displayed in security dumpxml.")
        elif checkpoint_cmd == "virsh_list":
            stdout = virsh.dom_list(cmd_flag, **virsh_dargs).stdout_text.strip()
            if ((vm_name in stdout and cmd_flag == "--without-checkpoint") or
                    (vm_name not in stdout and cmd_flag == "--with-checkpoint")):
                test.fail("virsh list with '%s' contains wrong data" % cmd_flag)
        # Make sure vm is running and check checkpoints can be normally deleted
        if not vm.is_alive():
            vm.start()
            vm.wait_for_login().close()
        utils_backup.clean_checkpoints(vm_name, clean_metadata=False,
                                       ignore_status=False)
    finally:
        # Remove checkpoints
        utils_backup.clean_checkpoints(vm_name,
                                       clean_metadata=not vm.is_alive())

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove test image
        if "disk_path" in locals():
            if os.path.exists(disk_path):
                os.remove(disk_path)
예제 #20
0
 def _check_disk_partitions_number():
     """ Check the data disk partitions number. """
     disks = utils_disk.get_linux_disks(session, True)
     return len(re.findall(r'%s\d+' % device_name[5:], ' '.join(disks))) == 1
예제 #21
0
파일: dd_test.py 프로젝트: wainersm/tp-qemu
def run(test, params, env):
    """
    Executes dd with defined parameters and checks the return number and output

    Test steps:
    1). wait guest boot up
    2). run dd command in guest with special params(eg. oflag, bs and so on)
    3). check command exit stauts and output
    """
    def _get_file(filename, select, test=test):
        """ Picks the actual file based on select value """
        if filename == "NULL":
            return "/dev/null"
        elif filename == "ZERO":
            return "/dev/zero"
        elif filename == "RANDOM":
            return "/dev/random"
        elif filename == "URANDOM":
            return "/dev/urandom"
        elif filename in params.objects("images"):
            drive_id = params["blk_extra_params_%s" % filename].split("=")[1]
            drive_path = utils_misc.get_linux_drive_path(session, drive_id)
            if drive_path:
                return drive_path
            test.error("Failed to get '%s' drive path" % filename)
        else:
            # get all matching filenames
            try:
                disks = sorted(session.cmd("ls -1d %s" % filename).split('\n'))
            except aexpect.ShellCmdError:  # No matching file (creating new?)
                disks = [filename]
            if disks[-1] == '':
                disks = disks[:-1]
            try:
                return disks[select]
            except IndexError:
                err = ("Incorrect cfg: dd_select out of the range (disks=%s,"
                       " select=%s)" % (disks, select))
                logging.error(err)
                test.error(err)

    vm = env.get_vm(params['main_vm'])
    timeout = int(params.get("login_timeout", 360))

    error_context.context("Wait guest boot up", logging.info)
    session = vm.wait_for_login(timeout=timeout)

    dd_keys = [
        'dd_if', 'dd_of', 'dd_bs', 'dd_count', 'dd_iflag', 'dd_oflag',
        'dd_skip', 'dd_seek'
    ]

    dd_params = {key: params.get(key, None) for key in dd_keys}
    if dd_params['dd_bs'] is None:
        dd_params['dd_bs'] = '512'
    dd_params['dd_bs'] = dd_params['dd_bs'].split()
    bs_count = len(dd_params['dd_bs'])

    dd_timeout = int(params.get("dd_timeout", 180))
    dd_output = params.get("dd_output", "")
    dd_stat = int(params.get("dd_stat", 0))

    dev_partitioned = []
    for arg in ['dd_if', 'dd_of']:
        filename = dd_params[arg]
        path = _get_file(filename, int(params.get('%s_select' % arg, '-1')))
        if (bs_count > 1 and filename in params.objects('images')):
            psize = float(
                utils_numeric.normalize_data_size(
                    params.get("partition_size", '2G')))
            start = 0.0
            dev_id = os.path.split(path)[-1]
            dev_partitioned.append(dev_id)

            utils_disk.create_partition_table_linux(session, dev_id, 'gpt')
            for i in range(bs_count):
                utils_disk.create_partition_linux(session, dev_id,
                                                  '%fM' % psize, '%fM' % start)
                start += psize

            disks = utils_disk.get_linux_disks(session, partition=True)
            partitions = [
                key for key in disks if re.match(r'%s\d+$' % dev_id, key)
            ]
            partitions.sort()
            dd_params[arg] = [
                path.replace(dev_id, part) for part in partitions
            ]
        else:
            dd_params[arg] = [path]

    if bs_count > 1 and not dev_partitioned:
        test.error('with multiple bs, either dd_if or \
                   dd_of must be a block device')

    dd_cmd = ['dd']
    for key in dd_keys:
        value = dd_params[key]
        if value is None:
            continue
        arg = key.split('_')[-1]
        if key in ['dd_if', 'dd_of', 'dd_bs']:
            part = '%s=%s' % (arg, '{}')
        else:
            part = '%s=%s' % (arg, value)
        dd_cmd.append(part)
    dd_cmd = ' '.join(dd_cmd)

    remaining = [dd_params[key] for key in ['dd_if', 'dd_of', 'dd_bs']]
    if len(dd_params['dd_if']) != bs_count:
        fillvalue = dd_params['dd_if'][-1]
    else:
        fillvalue = dd_params['dd_of'][-1]
    cmd = [
        dd_cmd.format(*t) for t in zip_longest(*remaining, fillvalue=fillvalue)
    ]
    cmd = ' & '.join(cmd)
    logging.info("Using '%s' cmd", cmd)

    try:
        error_context.context("Execute dd in guest", logging.info)
        try:
            (stat, out) = session.cmd_status_output(cmd, timeout=dd_timeout)
        except aexpect.ShellTimeoutError:
            err = ("dd command timed-out (cmd='%s', timeout=%d)" %
                   (cmd, dd_timeout))
            test.fail(err)
        except aexpect.ShellCmdError as details:
            stat = details.status
            out = details.output

        error_context.context("Check command exit status and output",
                              logging.info)
        logging.debug("Returned dd_status: %s\nReturned output:\n%s", stat,
                      out)
        if stat != dd_stat:
            err = ("Return code doesn't match (expected=%s, actual=%s)\n"
                   "Output:\n%s" % (dd_stat, stat, out))
            test.fail(err)
        if dd_output not in out:
            err = ("Output doesn't match:\nExpected:\n%s\nActual:\n%s" %
                   (dd_output, out))
            test.fail(err)
        logging.info("dd test succeeded.")
    finally:
        for dev_id in dev_partitioned:
            utils_disk.clean_partition_linux(session, dev_id)
        session.close()
예제 #22
0
def run(test, params, env):
    """
    Test to install the guest OS on the lvm device which is created
    on an iSCSI target.
    Steps:
        1) Setup iSCSI initiator on local host.
        2) Discovery and login the above iSCSI target.
        3) Send sg_inq to get information on the host.
        4) Boot guest with this lun as a block device as the second
           disk, with scsi=on,format=raw,werror=stop,rerror=stop.
        5) In the guest, sg_inq should show similar information in
           step 3.
        6) Logout iscsi server.
        7) Check the disk info with sg_inq inside guest, should show
           fail information.
        8) Run dd on this disk, the guest should stop.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    def fetch_sg_info(device, session=None):
        cmd = params['cmd_sg_inq'] % device
        if session:
            return session.cmd_output(cmd)
        return process.getoutput(cmd, 60, ignore_status=False)

    iscsi = Iscsi.create_iSCSI(params, data_dir.get_data_dir())
    try:
        iscsi.login()
        if not utils_misc.wait_for(lambda: iscsi.get_device_name(), 60):
            test.error('Can not get the iSCSI device.')

        cmd_get_disk_path = params['cmd_get_disk_path']
        disk_path = process.system_output(cmd_get_disk_path, 60,
                                          shell=True).decode()

        host_sg_info = fetch_sg_info(disk_path)
        logging.info('The scsi generic info from host: %s', host_sg_info)

        image_data_tag = params['image_data_tag']
        params['image_name_%s' % image_data_tag] = disk_path
        params['image_size'] = params['emulated_image_size']
        image_params = params.object_params(image_data_tag)
        env_process.preprocess_image(test, image_params, image_data_tag)

        params["start_vm"] = "yes"
        env_process.preprocess_vm(test, params, env, params["main_vm"])
        vm = env.get_vm(params["main_vm"])
        vm.verify_alive()
        session = vm.wait_for_login()

        data_disk = '/dev/' + list(get_linux_disks(session).keys()).pop()
        guest_sg_info = fetch_sg_info(data_disk, session)
        logging.info('The scsi generic info from guest: %s', guest_sg_info)

        for info in guest_sg_info.split():
            if info not in host_sg_info:
                test.fail('The guest scsi generic info is not similar to host.')

        iscsi.logout()
        if params['sg_fail_info'] not in fetch_sg_info(data_disk, session):
            test.fail('No found the fail information after logout iscsi server.')

        session.cmd_output(params['cmd_dd'] % data_disk)
        vm_status_paused = params['vm_status_paused']
        if not utils_misc.wait_for(
                lambda: vm.monitor.verify_status(vm_status_paused), 120, step=3):
            test.fail('The vm status is not %s.' % vm_status_paused)
    finally:
        iscsi.delete_target()
예제 #23
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()

        # Enable vm incremental backup capability. This is only a workaround
        # to make sure incremental backup can work for the vm. Code needs to
        # be removded immediately when the function enabled by default, which
        # is tracked by bz1799015
        tree = ET.parse(vmxml.xml)
        root = tree.getroot()
        for elem in root.iter('domain'):
            elem.set('xmlns:qemu',
                     'http://libvirt.org/schemas/domain/qemu/1.0')
            qemu_cap = ET.Element("qemu:capabilities")
            elem.insert(-1, qemu_cap)
            incbackup_cap = ET.Element("qemu:add")
            incbackup_cap.set('capability', 'incremental-backup')
            qemu_cap.insert(1, incbackup_cap)
        vmxml.undefine()
        tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml")
        tree.write(tmp_vm_xml)
        virsh.define(tmp_vm_xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Script insert xml elements to make sure vm can support "
                      "incremental backup. This should be removded when "
                      "bz 1799015 fixed.")

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = "localhost"
                backup_server_dict["port"] = nbd_tcp_port
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {}
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_file_path = os.path.join(
                            tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file",
                                                      scratch_file_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["file"] = scratch_file_path
                        logging.debug("scratch_params: %s", scratch_params)
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        logging.debug("abcd scratch_blkdev_path:%s",
                                      scratch_blkdev_path)
                        scratch_params["dev"] = scratch_blkdev_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            utils_backup.dd_data_to_vm_disk(vm, test_disk_in_vm, dd_bs,
                                            dd_seek, dd_count)

            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            if not is_incremental:
                # Do full backup
                if nbd_protocol == "unix":
                    nbd_export = ("nbd+unix:///%s?socket=%s" %
                                  (nbd_export_name, nbd_socket))
                elif nbd_protocol == "tcp":
                    nbd_export = ("nbd://localhost:%s/%s" %
                                  (nbd_tcp_port, nbd_export_name))
                utils_backup.pull_full_backup_to_file(nbd_export,
                                                      backup_file_path)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                nbd_params = {
                    "nbd_protocol": nbd_protocol,
                    "nbd_export": nbd_export_name
                }
                if nbd_protocol == "tcp":
                    nbd_params["nbd_tcp_port"] = nbd_tcp_port
                elif nbd_protocol == "unix":
                    nbd_params["nbd_socket"] = nbd_socket
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
예제 #24
0
def run(test, params, env):
    """
    KVM block resize test:

    1) Start guest with data disk or system disk.
    2) Do format disk in guest if needed.
    3) Record md5 of test file on the data disk.
       Enlarge the data disk image from qemu monitor.
    4) Extend data disk partition/file-system in guest.
    5) Verify the data disk size match expected size.
    6) Reboot the guest.
    7) Do iozone test, compare the md5 of test file.
    8) Shrink data disk partition/file-system in guest.
    9) Shrink data disk image from qemu monitor.
    10) Verify the data disk size match expected size.
    11) Reboot the guest.
    12) Do iozone test, compare the md5 of test file.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def verify_disk_size(session, os_type, disk):
        """
        Verify the current block size match with the expected size.
        """
        global current_size
        current_size = utils_disk.get_disk_size(session, os_type, disk)
        accept_ratio = float(params.get("accept_ratio", 0))
        if (current_size <= block_size
                and current_size >= block_size * (1 - accept_ratio)):
            logging.info(
                "Block Resizing Finished !!! \n"
                "Current size %s is same as the expected %s", current_size,
                block_size)
            return True

    def create_md5_file(filename):
        """
        Create the file to verify md5 value.
        """
        logging.debug("create md5 file %s" % filename)
        if os_type == 'windows':
            vm.copy_files_to(params["tmp_md5_file"], filename)
        else:
            session.cmd(params["dd_cmd"] % filename)

    def get_md5_of_file(filename):
        """
        Get the md5 value of filename.
        """
        ex_args = (mpoint, filename) if os_type == 'windows' else filename
        return session.cmd(md5_cmd % ex_args).split()[0]

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    driver_name = params.get("driver_name")
    os_type = params["os_type"]
    fstype = params.get("fstype")
    labeltype = params.get("labeltype", "msdos")
    img_size = params.get("image_size_stg", "10G")
    mpoint = params.get("disk_letter", "C")
    disk = params.get("disk_index", 0)
    md5_cmd = params.get("md5_cmd", "md5sum %s")
    md5_file = params.get("md5_file", "md5.dat")
    data_image = params.get("images").split()[-1]
    data_image_params = params.object_params(data_image)
    data_image_filename = storage.get_image_filename(data_image_params,
                                                     data_dir.get_data_dir())
    data_image_dev = vm.get_block({'file': data_image_filename})
    img = QemuImg(data_image_params, data_dir.get_data_dir(), data_image)
    block_virtual_size = json.loads(img.info(force_share=True,
                                             output="json"))["virtual-size"]

    session = vm.wait_for_login(timeout=timeout)

    if os_type == 'windows' and driver_name:
        session = utils_test.qemu.windrv_check_running_verifier(
            session, vm, test, driver_name, timeout)

    if params.get("format_disk") == "yes":
        if os_type == 'linux':
            disk = sorted(utils_disk.get_linux_disks(session).keys())[0]
        else:
            disk = utils_disk.get_windows_disks_index(session, img_size)[0]
            utils_disk.update_windows_disk_attributes(session, disk)
        error_context.context("Formatting disk", logging.info)
        mpoint = utils_disk.configure_empty_disk(session,
                                                 disk,
                                                 img_size,
                                                 os_type,
                                                 fstype=fstype,
                                                 labeltype=labeltype)[0]
        partition = mpoint.replace('mnt', 'dev') if 'mnt' in mpoint else None

    for ratio in params.objects("disk_change_ratio"):
        block_size = int(int(block_virtual_size) * float(ratio))

        # Record md5
        if params.get('md5_test') == 'yes':
            junction = ":\\" if os_type == 'windows' else "/"
            md5_filename = mpoint + junction + md5_file
            create_md5_file(md5_filename)
            md5 = get_md5_of_file(md5_filename)
            logging.debug("Got md5 %s ratio:%s on %s" % (md5, ratio, disk))

        # We need shrink the disk in guest first, than in monitor
        if float(ratio) < 1.0:
            error_context.context(
                "Shrink disk size to %s in guest" % block_size, logging.info)
            if os_type == 'windows':
                shr_size = utils_numeric.normalize_data_size(
                    str(
                        utils_disk.get_disk_size(session, os_type, disk) -
                        block_size), 'M').split(".")[0]
                drive.shrink_volume(session, mpoint, shr_size)
            else:
                utils_disk.resize_filesystem_linux(session, partition,
                                                   str(block_size))
                utils_disk.resize_partition_linux(session, partition,
                                                  str(block_size))

        error_context.context("Change disk size to %s in monitor" % block_size,
                              logging.info)
        if vm.check_capability(Flags.BLOCKDEV):
            args = (None, block_size, data_image_dev)
        else:
            args = (data_image_dev, block_size)
        vm.monitor.block_resize(*args)

        if params.get("guest_prepare_cmd", ""):
            session.cmd(params.get("guest_prepare_cmd"))
        # Update GPT due to size changed
        if os_type == "linux" and labeltype == "gpt":
            session.cmd("sgdisk -e /dev/%s" % disk)
        if params.get("need_reboot") == "yes":
            session = vm.reboot(session=session)
        if params.get("need_rescan") == "yes":
            drive.rescan_disks(session)

        # We need extend disk in monitor first than extend it in guest
        if float(ratio) > 1.0:
            error_context.context("Extend disk to %s in guest" % block_size,
                                  logging.info)
            if os_type == 'windows':
                drive.extend_volume(session, mpoint)
            else:
                utils_disk.resize_partition_linux(session, partition,
                                                  str(block_size))
                utils_disk.resize_filesystem_linux(session, partition,
                                                   utils_disk.SIZE_AVAILABLE)
        global current_size
        current_size = 0
        if not wait.wait_for(lambda: verify_disk_size(session, os_type, disk),
                             20, 0, 1, "Block Resizing"):
            test.fail("Block size get from guest is not same as expected.\n"
                      "Reported: %s\nExpect: %s\n" %
                      (current_size, block_size))
        session = vm.reboot(session=session)

        if os_type == 'linux':
            if not utils_disk.is_mount(
                    partition, dst=mpoint, fstype=fstype, session=session):
                utils_disk.mount(partition,
                                 mpoint,
                                 fstype=fstype,
                                 session=session)

        if params.get('iozone_test') == 'yes':
            iozone_timeout = float(params.get("iozone_timeout", 1800))
            iozone_cmd_options = params.get("iozone_option") % mpoint
            io_test = generate_instance(params, vm, 'iozone')
            try:
                io_test.run(iozone_cmd_options, iozone_timeout)
            finally:
                io_test.clean()

        # Verify md5
        if params.get('md5_test') == 'yes':
            new_md5 = get_md5_of_file(md5_filename)
            test.assertTrue(new_md5 == md5, "Unmatched md5: %s" % new_md5)

    session.close()
예제 #25
0
def run(test, params, env):
    """
    Test multi disk suport of guest, this case will:
    1) Create disks image in configuration file.
    2) Start the guest with those disks.
    3) Checks qtree vs. test params. (Optional)
    4) Create partition on those disks.
    5) Get disk dev filenames in guest.
    6) Format those disks in guest.
    7) Copy file into / out of those disks.
    8) Compare the original file and the copied file using md5 or fc comand.
    9) Repeat steps 3-5 if needed.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _add_param(name, value):
        """ Converts name+value to stg_params string """
        if value:
            value = re.sub(' ', '\\ ', value)
            return " %s:%s " % (name, value)
        else:
            return ''

    def _do_post_cmd(session):
        cmd = params.get("post_cmd")
        if cmd:
            session.cmd_status_output(cmd)
        session.close()

    error_context.context("Parsing test configuration", logging.info)
    stg_image_num = 0
    stg_params = params.get("stg_params", "")
    # Compatibility
    stg_params += _add_param("image_size", params.get("stg_image_size"))
    stg_params += _add_param("image_format", params.get("stg_image_format"))
    stg_params += _add_param("image_boot", params.get("stg_image_boot", "no"))
    stg_params += _add_param("drive_format", params.get("stg_drive_format"))
    stg_params += _add_param("drive_cache", params.get("stg_drive_cache"))
    if params.get("stg_assign_index") != "no":
        # Assume 0 and 1 are already occupied (hd0 and cdrom)
        stg_params += _add_param("drive_index", 'range(2,n)')
    param_matrix = {}

    stg_params = stg_params.split(' ')
    i = 0
    while i < len(stg_params) - 1:
        if not stg_params[i].strip():
            i += 1
            continue
        if stg_params[i][-1] == '\\':
            stg_params[i] = '%s %s' % (stg_params[i][:-1],
                                       stg_params.pop(i + 1))
        i += 1

    rerange = []
    has_name = False
    for i in range(len(stg_params)):
        if not stg_params[i].strip():
            continue
        (cmd, parm) = stg_params[i].split(':', 1)
        if cmd == "image_name":
            has_name = True
        if _RE_RANGE1.match(parm):
            parm = _range(parm)
            if parm is False:
                test.error("Incorrect cfg: stg_params %s looks "
                           "like range(..) but doesn't contain "
                           "numbers." % cmd)
            param_matrix[cmd] = parm
            if type(parm) is str:
                # When we know the stg_image_num, substitute it.
                rerange.append(cmd)
                continue
        else:
            # ',' separated list of values
            parm = parm.split(',')
            j = 0
            while j < len(parm) - 1:
                if parm[j][-1] == '\\':
                    parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1))
                j += 1
            param_matrix[cmd] = parm
        stg_image_num = max(stg_image_num, len(parm))

    stg_image_num = int(params.get('stg_image_num', stg_image_num))
    for cmd in rerange:
        param_matrix[cmd] = _range(param_matrix[cmd], stg_image_num)
    # param_table* are for pretty print of param_matrix
    param_table = []
    param_table_header = ['name']
    if not has_name:
        param_table_header.append('image_name')
    for _ in param_matrix:
        param_table_header.append(_)

    stg_image_name = params.get('stg_image_name', 'images/%s')
    for i in range(stg_image_num):
        name = "stg%d" % i
        params['images'] += " %s" % name
        param_table.append([])
        param_table[-1].append(name)
        if not has_name:
            params["image_name_%s" % name] = stg_image_name % name
            param_table[-1].append(params.get("image_name_%s" % name))
        for parm in param_matrix.items():
            params['%s_%s' % (parm[0], name)] = str(parm[1][i % len(parm[1])])
            param_table[-1].append(params.get('%s_%s' % (parm[0], name)))

    if params.get("multi_disk_params_only") == 'yes':
        # Only print the test param_matrix and finish
        logging.info('Newly added disks:\n%s',
                     astring.tabular_output(param_table, param_table_header))
        return

    # Always recreate VMs and disks
    error_context.context("Start the guest with new disks", logging.info)
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        env_process.process_images(env_process.preprocess_image, test,
                                   vm_params)

    error_context.context("Start the guest with those disks", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.create(timeout=max(10, stg_image_num), params=params)
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)

    n_repeat = int(params.get("n_repeat", "1"))
    file_system = [_.strip() for _ in params["file_system"].split()]
    cmd_timeout = float(params.get("cmd_timeout", 360))
    black_list = params["black_list"].split()
    drive_letters = int(params.get("drive_letters", "26"))
    stg_image_size = params["stg_image_size"]
    dd_test = params.get("dd_test", "no")
    labeltype = params.get("labeltype", "gpt")

    have_qtree = True
    out = vm.monitor.human_monitor_cmd("info qtree", debug=False)
    if "unknown command" in str(out):
        have_qtree = False

    if (params.get("check_guest_proc_scsi") == "yes") and have_qtree:
        error_context.context("Verifying qtree vs. test params")
        err = 0
        qtree = qemu_qtree.QtreeContainer()
        qtree.parse_info_qtree(vm.monitor.info('qtree'))
        disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
        (tmp1, tmp2) = disks.parse_info_block(vm.monitor.info_block())
        err += tmp1 + tmp2
        err += disks.generate_params()
        err += disks.check_disk_params(params)
        (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(
            session.cmd_output('cat /proc/scsi/scsi'))
        err += tmp1 + tmp2

        if err:
            test.fail("%s errors occurred while verifying qtree vs."
                      " params" % err)
        if params.get('multi_disk_only_qtree') == 'yes':
            return
    try:
        err_msg = "Set disks num: %d" % stg_image_num
        err_msg += ", Get disks num in guest: %d"
        ostype = params["os_type"]
        if ostype == "windows":
            error_context.context("Get windows disk index that to "
                                  "be formatted", logging.info)
            disks = utils_disk.get_windows_disks_index(session, stg_image_size)
            if len(disks) < stg_image_num:
                test.fail("Fail to list all the volumes"
                          ", %s" % err_msg % len(disks))
            if len(disks) > drive_letters:
                black_list.extend(utils_misc.get_winutils_vol(session))
                disks = random.sample(disks, drive_letters - len(black_list))
            error_context.context("Clear readonly for all disks and online "
                                  "them in windows guest.", logging.info)
            if not utils_disk.update_windows_disk_attributes(session, disks):
                test.fail("Failed to update windows disk attributes.")
            dd_test = "no"
        else:
            error_context.context("Get linux disk that to be "
                                  "formatted", logging.info)
            disks = sorted(utils_disk.get_linux_disks(session).keys())
            if len(disks) < stg_image_num:
                test.fail("Fail to list all the volumes"
                          ", %s" % err_msg % len(disks))
    except Exception:
        _do_post_cmd(session)
        raise
    try:
        for i in range(n_repeat):
            logging.info("iterations: %s", (i + 1))
            for disk in disks:
                error_context.context("Format disk in guest: '%s'" % disk,
                                      logging.info)
                # Random select one file system from file_system
                index = random.randint(0, (len(file_system) - 1))
                fstype = file_system[index].strip()
                partitions = utils_disk.configure_empty_disk(
                    session, disk, stg_image_size, ostype,
                    fstype=fstype, labeltype=labeltype)
                if not partitions:
                    test.fail("Fail to format disks.")
                cmd_list = params["cmd_list"]
                for partition in partitions:
                    if "/" not in partition:
                        partition += ":"
                    else:
                        partition = partition.split("/")[-1]
                    error_context.context("Copy file into / out of partition:"
                                          " %s..." % partition, logging.info)
                    for cmd_l in cmd_list.split():
                        cmd = params.get(cmd_l)
                        if cmd:
                            session.cmd(cmd % partition, timeout=cmd_timeout)
                    cmd = params["compare_command"]
                    key_word = params["check_result_key_word"]
                    output = session.cmd_output(cmd)
                    if key_word not in output:
                        test.fail("Files on guest os root fs and disk differ")
                    if dd_test != "no":
                        error_context.context("dd test on partition: %s..."
                                              % partition, logging.info)
                        status, output = session.cmd_status_output(
                            dd_test % (partition, partition), timeout=cmd_timeout)
                        if status != 0:
                            test.fail("dd test fail: %s" % output)
            need_reboot = params.get("need_reboot", "no")
            need_shutdown = params.get("need_shutdown", "no")
            if need_reboot == "yes":
                error_context.context("Rebooting guest ...", logging.info)
                session = vm.reboot(session=session, timeout=login_timeout)
            if need_shutdown == "yes":
                error_context.context("Shutting down guest ...", logging.info)
                vm.graceful_shutdown(timeout=login_timeout)
                if vm.is_alive():
                    test.fail("Fail to shut down guest.")
                error_context.context("Start the guest again.", logging.info)
                vm = env.get_vm(params["main_vm"])
                vm.create(params=params)
                session = vm.wait_for_login(timeout=login_timeout)
            error_context.context("Delete partitions in guest.", logging.info)
            for disk in disks:
                utils_disk.clean_partition(session, disk, ostype)
    finally:
        _do_post_cmd(session)
예제 #26
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    local_hostname = params.get("loal_hostname", "localhost")
    local_ip = params.get("local_ip", "127.0.0.1")
    local_user_name = params.get("local_user_name", "root")
    local_user_password = params.get("local_user_password", "redhat")
    tmp_dir = data_dir.get_tmp_dir()
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    expect_backup_canceled = "yes" == params.get("expect_backup_canceled")
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    # TLS service config
    tls_enabled = "yes" == params.get("tls_enabled")
    tls_x509_verify = "yes" == params.get("tls_x509_verify")
    custom_pki_path = "yes" == params.get("custom_pki_path")
    tls_client_ip = tls_server_ip = local_ip
    tls_client_cn = tls_server_cn = local_hostname
    tls_client_user = tls_server_user = local_user_name
    tls_client_pwd = tls_server_pwd = local_user_password
    tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert")
    tls_error = "yes" == params.get("tls_error")
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")

    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")
    if tls_enabled and not libvirt_version.version_compare(6, 6, 0):
        test.cancel("Current libvirt version doesn't support pull mode "
                    "backup with tls nbd.")

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare tls env
        if tls_enabled:
            # Prepare pki
            tls_config = {
                "qemu_tls": "yes",
                "auto_recover": "yes",
                "client_ip": tls_client_ip,
                "server_ip": tls_server_ip,
                "client_cn": tls_client_cn,
                "server_cn": tls_server_cn,
                "client_user": tls_client_user,
                "server_user": tls_server_user,
                "client_pwd": tls_client_pwd,
                "server_pwd": tls_server_pwd,
            }
            if custom_pki_path:
                pki_path = os.path.join(tmp_dir, "inc_bkup_pki")
            else:
                pki_path = "/etc/pki/libvirt-backup/"
            if tls_x509_verify:
                tls_config["client_ip"] = tls_client_ip
            tls_config["custom_pki_path"] = pki_path
            tls_obj = TLSConnection(tls_config)
            tls_obj.conn_setup(True, tls_provide_client_cert)
            logging.debug("TLS certs in: %s" % pki_path)
            # Set qemu.conf
            qemu_config = LibvirtQemuConfig()
            if tls_x509_verify:
                qemu_config.backup_tls_x509_verify = True
            else:
                qemu_config.backup_tls_x509_verify = False
            if custom_pki_path:
                qemu_config.backup_tls_x509_cert_dir = pki_path
            utils_libvirtd.Libvirtd().restart()

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = nbd_hostname
                backup_server_dict["port"] = nbd_tcp_port
                if tls_enabled:
                    backup_server_dict["tls"] = "yes"
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_path = None
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_path = os.path.join(tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file", scratch_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["attrs"]["file"] = scratch_path
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        scratch_params["attrs"]["dev"] = scratch_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    if scratch_luks_encrypted:
                        encryption_dict = {
                            "encryption": "luks",
                            "secret": {
                                "type": "passphrase",
                                "uuid": luks_secret_uuid
                            }
                        }
                        scratch_params["encryption"] = encryption_dict
                    logging.debug("scratch params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()
            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml
            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               ignore_status=True,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())
            # If required, do some error operations during backup job
            error_operation = params.get("error_operation")
            if error_operation:
                if "destroy_vm" in error_operation:
                    vm.destroy(gracefully=False)
                if "kill_qemu" in error_operation:
                    utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL)
                if utils_misc.wait_for(
                        lambda: utils_backup.is_backup_canceled(vm_name),
                        timeout=5):
                    raise utils_backup.BackupCanceledError()
                elif expect_backup_canceled:
                    test.fail("Backup job should be canceled but not.")
            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_export": nbd_export_name
            }
            if nbd_protocol == "unix":
                nbd_params["nbd_socket"] = nbd_socket
            elif nbd_protocol == "tcp":
                nbd_params["nbd_hostname"] = nbd_hostname
                nbd_params["nbd_tcp_port"] = nbd_tcp_port
                if tls_enabled:
                    nbd_params["tls_dir"] = pki_path
                    nbd_params["tls_server_ip"] = tls_server_ip
            if not is_incremental:
                # Do full backup
                try:
                    utils_backup.pull_full_backup_to_file(
                        nbd_params, backup_file_path)
                except Exception as details:
                    if tls_enabled and tls_error:
                        raise utils_backup.BackupTLSError(details)
                    else:
                        test.fail("Fail to get full backup data: %s" % details)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            # Check if scratch file encrypted
            if scratch_luks_encrypted and scratch_path:
                cmd = "qemu-img info -U %s" % scratch_path
                result = process.run(cmd, shell=True,
                                     verbose=True).stdout_text.strip()
                if (not re.search("format.*luks", result, re.IGNORECASE)
                        or not re.search("encrypted.*yes", result,
                                         re.IGNORECASE)):
                    test.fail("scratch file/dev is not encrypted by LUKS")
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as detail:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail("Backup failed to start: %s" % detail)
    except utils_backup.BackupTLSError as detail:
        if tls_error:
            logging.debug("Failed to get backup data as expected.")
        else:
            test.fail("Failed to get tls backup data: %s" % detail)
    except utils_backup.BackupCanceledError as detail:
        if expect_backup_canceled:
            logging.debug("Backup canceled as expected.")
            if not vm.is_alive():
                logging.debug("Check if vm can be started again when backup "
                              "canceled.")
                vm.start()
                vm.wait_for_login().close()
        else:
            test.fail("Backup job canceled: %s" % detail)
    finally:
        # Remove checkpoints
        clean_checkpoint_metadata = not vm.is_alive()
        if "error_operation" in locals() and error_operation is not None:
            if "kill_qemu" in error_operation:
                clean_checkpoint_metadata = True
        utils_backup.clean_checkpoints(
            vm_name, clean_metadata=clean_checkpoint_metadata)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)

        # Recover qemu.conf
        if "qemu_config" in locals():
            qemu_config.restore()

        # Remove tls object
        if "tls_obj" in locals():
            del tls_obj

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
예제 #27
0
파일: dd_test.py 프로젝트: ldoktor/tp-qemu
 def _check_disk_partitions_number():
     """ Check the data disk partitions number. """
     del partitions[:]
     partitions.extend(re.findall(
         r'%s\d+' % dev_id, ' '.join(utils_disk.get_linux_disks(session, True))))
     return len(partitions) == bs_count
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. monitor block-threshold event on scratch file/dev
    5. create some data on vdb's same position as step 2 to trigger event
    6. check the block-threshold event captured
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    event_type = params.get("event_type")
    usage_threshold = params.get("usage_threshold", "100")
    tmp_dir = data_dir.get_tmp_dir()
    local_hostname = params.get("loal_hostname", "localhost")
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")
    # Open a new virsh session for event monitor
    virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC, auto_close=True)
    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(7, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "event monitor for incremental backup.")

    def get_backup_disk_index(vm_name, disk_name):
        """
        Get the index of the backup disk to be monitored by the virsh event

        :param vm_name: vm name
        :param disk_name: virtual disk name, such as 'vdb'
        :return: the index of the virtual disk in backup xml
        """
        backup_xml = virsh.backup_dumpxml(vm_name).stdout.strip()
        logging.debug("%s's current backup xml is: %s" % (vm_name, backup_xml))
        backup_xml_dom = xml_utils.XMLTreeFile(backup_xml)
        index_xpath = "/disks/disk"
        for disk_element in backup_xml_dom.findall(index_xpath):
            if disk_element.get("name") == disk_name:
                return disk_element.get("index")

    def is_event_captured(virsh_session, re_pattern):
        """
        Check if event captured

        :param virsh_session: the virsh session of the event monitor
        :param re_pattern: the re pattern used to represent the event
        :return: True means event captured, False means not
        """
        ret_output = virsh_session.get_stripped_output()
        if (not re.search(re_pattern, ret_output, re.IGNORECASE)):
            return False
        logging.debug("event monitor output: %s", ret_output)
        return True

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure thedisk_element.getre is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        image_name = "{}_image.qcow2".format(original_disk_target)
        disk_path = os.path.join(tmp_dir, image_name)
        libvirt.create_local_disk("file", disk_path, original_disk_size,
                                  "qcow2")
        disk_params = {
            "device_type": "disk",
            "type_name": "file",
            "driver_type": "qcow2",
            "target_dev": original_disk_target,
            "source_file": disk_path
        }
        disk_params["target_dev"] = original_disk_target
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        backup_file_list = []

        # Prepare backup xml
        backup_params = {"backup_mode": "pull"}
        # Set libvirt default nbd export name and bitmap name
        nbd_export_name = original_disk_target
        nbd_bitmap_name = "backup-" + original_disk_target

        backup_server_dict = {}
        if nbd_protocol == "unix":
            backup_server_dict["transport"] = "unix"
            backup_server_dict["socket"] = nbd_socket
        else:
            backup_server_dict["name"] = nbd_hostname
            backup_server_dict["port"] = nbd_tcp_port
        backup_params["backup_server"] = backup_server_dict
        backup_disk_xmls = []
        for vm_disk in vm_disks:
            backup_disk_params = {"disk_name": vm_disk}
            if vm_disk != original_disk_target:
                backup_disk_params["enable_backup"] = "no"
            else:
                backup_disk_params["enable_backup"] = "yes"
                backup_disk_params["disk_type"] = scratch_type
                # Prepare nbd scratch file/dev params
                scratch_params = {"attrs": {}}
                scratch_path = None
                if scratch_type == "file":
                    scratch_file_name = "scratch_file"
                    scratch_path = os.path.join(tmp_dir, scratch_file_name)
                    if reuse_scratch_file:
                        libvirt.create_local_disk("file", scratch_path,
                                                  original_disk_size, "qcow2")
                    scratch_params["attrs"]["file"] = scratch_path
                elif scratch_type == "block":
                    scratch_path = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, image_size=scratch_blkdev_size)
                    scratch_params["attrs"]["dev"] = scratch_path
                else:
                    test.fail("We do not support backup scratch type: '%s'" %
                              scratch_type)
                if scratch_luks_encrypted:
                    encryption_dict = {
                        "encryption": "luks",
                        "secret": {
                            "type": "passphrase",
                            "uuid": luks_secret_uuid
                        }
                    }
                    scratch_params["encryption"] = encryption_dict
                logging.debug("scratch params: %s", scratch_params)
                backup_disk_params["backup_scratch"] = scratch_params

            backup_disk_xml = utils_backup.create_backup_disk_xml(
                backup_disk_params)
            backup_disk_xmls.append(backup_disk_xml)
        logging.debug("disk list %s", backup_disk_xmls)
        backup_xml = utils_backup.create_backup_xml(backup_params,
                                                    backup_disk_xmls)
        logging.debug("Backup Xml: %s", backup_xml)

        # Prepare checkpoint xml
        checkpoint_name = "checkpoint"
        checkpoint_list.append(checkpoint_name)
        cp_params = {"checkpoint_name": checkpoint_name}
        cp_params["checkpoint_desc"] = params.get("checkpoint_desc",
                                                  "desc of cp")
        disk_param_list = []
        for vm_disk in vm_disks:
            cp_disk_param = {"name": vm_disk}
            if vm_disk != original_disk_target:
                cp_disk_param["checkpoint"] = "no"
            else:
                cp_disk_param["checkpoint"] = "bitmap"
                cp_disk_bitmap = params.get("cp_disk_bitmap")
                if cp_disk_bitmap:
                    cp_disk_param["bitmap"] = cp_disk_bitmap
            disk_param_list.append(cp_disk_param)
        checkpoint_xml = utils_backup.create_checkpoint_xml(
            cp_params, disk_param_list)
        logging.debug("Checkpoint Xml: %s", checkpoint_xml)

        # Generate some random data in vm's test disk
        def dd_data_to_testdisk():
            """
            Generate some data to vm's test disk
            """
            dd_count = "1"
            dd_seek = "10"
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

        dd_data_to_testdisk()

        # Start backup
        backup_options = backup_xml.xml + " " + checkpoint_xml.xml
        if reuse_scratch_file:
            backup_options += " --reuse-external"
        backup_result = virsh.backup_begin(vm_name, backup_options, debug=True)

        # Start to monitor block-threshold of backup disk's scratch file/dev
        backup_disk_index = get_backup_disk_index(vm_name,
                                                  original_disk_target)
        if not backup_disk_index:
            test.fail("Backup xml has no index for disks.")
        backup_disk_obj = original_disk_target + "[%s]" % backup_disk_index
        virsh.domblkthreshold(
            vm_name, original_disk_target + "[%s]" % backup_disk_index,
            usage_threshold)
        event_cmd = "event %s %s --loop" % (vm_name, event_type)
        virsh_session.sendline(event_cmd)

        # Generate some random data to same position of vm's test disk
        dd_data_to_testdisk()

        # Check if the block-threshold event captured by monitor
        if event_type == "block-threshold":
            event_pattern = (".*block-threshold.*%s.*%s\[%s\].* %s .*" %
                             (vm_name, original_disk_target, backup_disk_index,
                              usage_threshold))
        if not utils_misc.wait_for(
                lambda: is_event_captured(virsh_session, event_pattern), 10):
            test.fail("Event not captured by event monitor")

        # Abort backup job
        virsh.domjobabort(vm_name, debug=True)

    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)

        # Remove iscsi devices
        if scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove scratch file
        if "scratch_path" in locals():
            if scratch_type == "file" and os.path.exists(scratch_path):
                os.remove(scratch_path)
예제 #29
0
def run(test, params, env):
    """
    KVM block resize test:

    1) Start guest with both data disk and system disk.
    2) Extend/shrink data disk in guest.
    3) Verify the data disk size match expected size.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def verify_disk_size(session, os_type, disk):
        """
        Verify the current block size match with the expected size.
        """
        current_size = utils_disk.get_disk_size(session, os_type, disk)
        accept_ratio = float(params.get("accept_ratio", 0))
        if (current_size <= block_size
                and current_size >= block_size * (1 - accept_ratio)):
            logging.info(
                "Block Resizing Finished !!! \n"
                "Current size %s is same as the expected %s", current_size,
                block_size)
            return True
        else:
            logging.error("Current: %s\nExpect: %s\n" %
                          (current_size, block_size))
            return False

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    os_type = params["os_type"]
    img_size = params.get("image_size_stg", "10G")
    data_image = params.get("images").split()[-1]
    data_image_params = params.object_params(data_image)
    img = qemu_storage.QemuImg(data_image_params, data_dir.get_data_dir(),
                               data_image)
    filters = {}
    data_image_dev = ""
    if vm.check_capability(Flags.BLOCKDEV):
        filters = {"driver": data_image_params.get("image_format", "qcow2")}
    else:
        filters = {"file": img.image_filename}

    # get format node-name(-blockdev) or device name(-drive)
    for dev in vm.devices.get_by_params(filters):
        if dev.aobject == data_image:
            data_image_dev = dev.get_qid()

    if not data_image_dev:
        test.error("Cannot find device to resize.")

    block_virtual_size = json.loads(img.info(force_share=True,
                                             output="json"))["virtual-size"]

    session = vm.wait_for_login(timeout=timeout)
    disk = sorted(utils_disk.get_linux_disks(session).keys())[0]

    for ratio in params.objects("disk_change_ratio"):
        block_size = int(int(block_virtual_size) * float(ratio))
        error_context.context("Change disk size to %s in monitor" % block_size,
                              logging.info)

        if vm.check_capability(Flags.BLOCKDEV):
            args = (None, block_size, data_image_dev)
        else:
            args = (data_image_dev, block_size)
        vm.monitor.block_resize(*args)

        # to apply the new size
        if params.get("guest_prepare_cmd", ""):
            session.cmd(params.get("guest_prepare_cmd"))
        if params.get("need_reboot") == "yes":
            session = vm.reboot(session=session)

        if not wait.wait_for(lambda: verify_disk_size(session, os_type, disk),
                             20, 0, 1, "Block Resizing"):
            test.fail("The current block size is not the same as expected.\n")

    session.close()