コード例 #1
0
ファイル: storage.py プロジェクト: yafu-1/avocado-vt
def copy_nfs_image(params, image_name, root_dir):
    """
    copy image from image_path to nfs mount dir if image is not available
    or corrupted.

    :param params: Test dict params
    :param image_name: Master image name.
    :param root_dir: Base directory for relative filenames.
    :raise: TestSetupFail if image is unavailable/corrupted
    """
    image_format = params.get("image_format", "qcow2")
    if params.get("setup_local_nfs", "no") == "yes":
        # check for image availability in NFS shared path
        base_dir = params.get("images_base_dir", data_dir.get_data_dir())
        dst = get_image_filename(params, base_dir)
        if (not os.path.isfile(dst) or
                utils_misc.get_image_info(dst)['lcounts'].lower() == "true"):
            source = os.path.join(root_dir, "images", image_name)
            if image_format not in source:
                source = "%s.%s" % (source, image_format)
            logging.debug(
                "Checking for image available in image data "
                "path - %s", source)
            # check for image availability in images data directory
            if (os.path.isfile(source) and
                    not utils_misc.get_image_info(source)['lcounts'].lower()
                    == "true"):
                logging.debug("Copying guest image from %s to %s", source, dst)
                shutil.copy(source, dst)
            else:
                raise exceptions.TestSetupFail("Guest image is unavailable"
                                               "/corrupted in %s and %s" %
                                               (source, dst))
コード例 #2
0
def copy_nfs_image(params, image_name, root_dir):
    """
    copy image from image_path to nfs mount dir if image is not available
    or corrupted.

    :param params: Test dict params
    :param image_name: Master image name.
    :param root_dir: Base directory for relative filenames.
    :raise: TestSetupFail if image is unavailable/corrupted
    """
    image_format = params.get("image_format", "qcow2")
    if params.get("setup_local_nfs", "no") == "yes":
        # check for image availability in NFS shared path
        base_dir = params.get("images_base_dir", data_dir.get_data_dir())
        dst = get_image_filename(params, base_dir)
        if(not os.path.isfile(dst) or
           utils_misc.get_image_info(dst)['lcounts'].lower() == "true"):
            source = os.path.join(root_dir, "images", image_name)
            if image_format not in source:
                source = "%s.%s" % (source, image_format)
            logging.debug("Checking for image available in image data "
                          "path - %s", source)
            # check for image availability in images data directory
            if(os.path.isfile(source) and not
               utils_misc.get_image_info(source)['lcounts'].lower() == "true"):
                logging.debug("Copying guest image from %s to %s", source,
                              dst)
                shutil.copy(source, dst)
            else:
                raise exceptions.TestSetupFail("Guest image is unavailable"
                                               "/corrupted in %s and %s" %
                                               (source, dst))
コード例 #3
0
def copy_nfs_image(params, root_dir, basename=False):
    """
    copy image from image_path to nfs mount dir if image is not available
    or corrupted.

    :param params: Test dict params
    :param root_dir: Base directory for relative filenames.
    :param basename: True to use only basename of image name
    :raise: TestSetupFail if image is unavailable/corrupted
    """
    if params.get("setup_local_nfs", "no") == "yes":
        # check for image availability in NFS shared path
        base_dir = params["nfs_mount_dir"]
        dst = get_image_filename(params, base_dir, basename=basename)
        if (not os.path.isfile(dst) or
                utils_misc.get_image_info(dst)['lcounts'].lower() == "true"):
            source = get_image_filename(params, root_dir)
            logging.debug(
                "Checking for image available in image data "
                "path - %s", source)
            # check for image availability in images data directory
            if (os.path.isfile(source) and
                    not utils_misc.get_image_info(source)['lcounts'].lower()
                    == "true"):
                logging.debug("Copying guest image from %s to %s", source, dst)
                shutil.copy(source, dst)
            else:
                raise exceptions.TestSetupFail("Guest image is unavailable"
                                               "/corrupted in %s and %s" %
                                               (source, dst))
コード例 #4
0
ファイル: v2v_options.py プロジェクト: noxdafox/tp-libvirt
 def check_image(output, check_point, expected_value):
     """
     Verify converted image file allocation mode and format
     """
     img_path = get_img_path(output)
     if not img_path or not os.path.isfile(img_path):
         logging.error("Fail to get image path: %s", img_path)
         return
     img_info = utils_misc.get_image_info(img_path)
     logging.info("Image info after converted: %s", img_info)
     if check_point == "allocation":
         if expected_value == "sparse":
             if img_info['vsize'] > img_info['dsize']:
                 logging.info("Image file is sparse")
             else:
                 raise error.TestFail("Image allocation check fail")
         elif expected_value == "preallocated":
             if img_info['vsize'] <= img_info['dsize']:
                 logging.info("Image file is preallocated")
             else:
                 raise error.TestFail("Image allocation check fail")
     if check_point == "format":
         if expected_value == img_info['format']:
             logging.info("Image file format is %s", expected_value)
         else:
             raise error.TestFail("Image format check fail")
コード例 #5
0
def check_vol_info(pool_vol, vol_name, expect_info=None):
    """
    Check the volume info, or/and compare with the expect_info.

    :params pool_vol: Instance of PoolVolume.
    :params vol_name: Name of the volume.
    :params expect_info: Expect volume info for comparation.
    """
    vol_info = pool_vol.volume_info(vol_name)
    for key in vol_info:
        logging.debug("Volume info: %s = %s", key, vol_info[key])
    if not expect_info:
        return True
    else:
        check_capacity_pass = True
        check_allocation_pass = True
        try:
            # Get image info
            vol_path = pool_vol.list_volumes()[vol_name]
            img_info = utils_misc.get_image_info(vol_path)
            if expect_info['Capacity'] != img_info['vsize']:
                logging.debug("Capacity(Virtual size) is %s bytes",
                              img_info['vsize'])
                logging.error("Volume capacity not equal to expect value %s",
                              expect_info['Capacity'])
                check_capacity_pass = False
            if expect_info['Allocation'] != img_info['dsize']:
                logging.debug("Allocation(Disk size) is %s bytes",
                              img_info['dsize'])
                logging.error("Volume Allocation not equal to expect value %s",
                              expect_info['Allocation'])
                check_allocation_pass = False
            return check_capacity_pass & check_allocation_pass
        except KeyError, detail:
            raise error.TestError("Fail to check volume info:\n%s" % detail)
コード例 #6
0
ファイル: v2v_options.py プロジェクト: liuzzfnst/tp-libvirt
 def check_image(output, check_point, expected_value):
     """
     Verify converted image file allocation mode and format
     """
     img_path = get_img_path(output)
     if not img_path or not os.path.isfile(img_path):
         logging.error("Fail to get image path: %s", img_path)
         return
     img_info = utils_misc.get_image_info(img_path)
     logging.info("Image info after converted: %s", img_info)
     if check_point == "allocation":
         if expected_value == "sparse":
             if img_info['vsize'] > img_info['dsize']:
                 logging.info("Image file is sparse")
             else:
                 raise error.TestFail("Image allocation check fail")
         elif expected_value == "preallocated":
             if img_info['vsize'] <= img_info['dsize']:
                 logging.info("Image file is preallocated")
             else:
                 raise error.TestFail("Image allocation check fail")
     if check_point == "format":
         if expected_value == img_info['format']:
             logging.info("Image file format is %s", expected_value)
         else:
             raise error.TestFail("Image format check fail")
コード例 #7
0
    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name,
                                                   brick_path, pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                        (image_info["format"], disk_format,
                         image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt"
                    % (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip
コード例 #8
0
ファイル: virsh_boot.py プロジェクト: uyvan/tp-libvirt
def prepare_gluster_disk(blk_source, test, **kwargs):
    """
    Set up gluster disk device and replace the domain disk image

    :param blk_source: The domain disk image path
    :param test: Avocado test object
    :param kwargs: Key words for gluster device setup
    :return: host_ip
    """
    vol_name = kwargs.get("vol_name")
    brick_path = kwargs.get("brick_path")
    disk_img = kwargs.get("disk_img")
    disk_format = kwargs.get("disk_format")
    host_ip = gluster.setup_or_cleanup_gluster(True, **kwargs)
    logging.debug("host ip: %s ", host_ip)
    # Copy the domain disk image to gluster disk path
    image_info = utils_misc.get_image_info(blk_source)
    dest_image = "/mnt/%s" % disk_img
    if image_info["format"] == disk_format:
        disk_cmd = ("cp -f %s %s" % (blk_source, dest_image))
    else:
        disk_cmd = (
            "qemu-img convert -f %s -O %s %s %s" %
            (image_info["format"], disk_format, blk_source, dest_image))
    # Mount the gluster disk and create the image
    src_mnt = "%s:%s" % (host_ip, vol_name)
    if not utils_misc.mount(src_mnt, "/mnt", "glusterfs"):
        test.error("glusterfs mount failed")
    process.run("%s && chmod a+rw /mnt/%s && umount /mnt" %
                (disk_cmd, disk_img),
                shell=True)
    return host_ip
コード例 #9
0
 def check_image(img_path, check_point, expected_value):
     """
     Verify image file allocation mode and format
     """
     if not img_path or not os.path.isfile(img_path):
         raise exceptions.TestError("Image path: '%s' is invalid" % img_path)
     img_info = utils_misc.get_image_info(img_path)
     logging.debug("Image info: %s", img_info)
     if check_point == "allocation":
         if expected_value == "sparse":
             if img_info['vsize'] > img_info['dsize']:
                 logging.info("%s is a sparse image", img_path)
             else:
                 raise exceptions.TestFail("%s is not a sparse image" % img_path)
         elif expected_value == "preallocated":
             if img_info['vsize'] <= img_info['dsize']:
                 logging.info("%s is a preallocated image", img_path)
             else:
                 raise exceptions.TestFail("%s is not a preallocated image"
                                           % img_path)
     if check_point == "format":
         if expected_value == img_info['format']:
             logging.info("%s format is %s", img_path, expected_value)
         else:
             raise exceptions.TestFail("%s format is not %s"
                                       % (img_path, expected_value))
コード例 #10
0
def check_vol_info(pool_vol, vol_name, expect_info=None):
    """
    Check the volume info, or/and compare with the expect_info.

    :params pool_vol: Instance of PoolVolume.
    :params vol_name: Name of the volume.
    :params expect_info: Expect volume info for comparation.
    """
    vol_info = pool_vol.volume_info(vol_name)
    for key in vol_info:
        logging.debug("Volume info: %s = %s", key, vol_info[key])
    if not expect_info:
        return True
    else:
        check_capacity_pass = True
        check_allocation_pass = True
        try:
            # Get image info
            vol_path = pool_vol.list_volumes()[vol_name]
            img_info = utils_misc.get_image_info(vol_path)
            if expect_info['Capacity'] != img_info['vsize']:
                logging.debug("Capacity(Virtual size) is %s bytes",
                              img_info['vsize'])
                logging.error("Volume capacity not equal to expect value %s",
                              expect_info['Capacity'])
                check_capacity_pass = False
            if expect_info['Allocation'] != img_info['dsize']:
                logging.debug("Allocation(Disk size) is %s bytes",
                              img_info['dsize'])
                logging.error("Volume Allocation not equal to expect value %s",
                              expect_info['Allocation'])
                check_allocation_pass = False
            return check_capacity_pass & check_allocation_pass
        except KeyError, detail:
            raise error.TestError("Fail to check volume info:\n%s" % detail)
コード例 #11
0
ファイル: libvirt_disk.py プロジェクト: peixiu/avocado-vt
def create_remote_disk_by_same_metadata(vm, params):
    """
    Create an empty file image on remote host using same name/vsize/path/format
    as the first disk of the vm on local host

    :param vm:  the VM object
    :param params:  dict, parameters used
    :return:  str, the path of newly created image
    """
    disk_format = params.get("disk_format", "qcow2")
    server_ip = params.get('server_ip', params.get('migrate_dest_host'))
    server_user = params.get('server_user', params.get('remote_user'))
    server_pwd = params.get('server_pwd', params.get('migrate_dest_pwd'))

    blk_source = get_first_disk_source(vm)
    vsize = utils_misc.get_image_info(blk_source).get("vsize")
    remote_session = remote_old.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd, r'[$#%]')
    utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
    create_disk('file',
                path=blk_source,
                size=vsize,
                disk_format=disk_format,
                extra='',
                session=remote_session)

    remote_session.close()
    return blk_source
コード例 #12
0
    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path and name from parameters
        data_path = data_dir.get_data_dir()
        image_name = params.get("image_name")
        image_format = params.get("image_format")
        image_source = os.path.join(data_path,
                                    image_name + '.' + image_format)

        # Setup gluster.
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name,
                                                   brick_path, pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s /mnt/%s" % (image_source, disk_img))
        else:
            # Convert the disk format
            disk_cmd = ("qemu-img convert -f %s -O %s %s /mnt/%s" %
                        (image_info["format"], disk_format, image_source, disk_img))

        # Mount the gluster disk and create the image.
        utils.run("mount -t glusterfs %s:%s /mnt;"
                  " %s; chmod a+rw /mnt/%s; umount /mnt"
                  % (host_ip, vol_name, disk_cmd, disk_img))

        return host_ip
コード例 #13
0
ファイル: virsh_boot.py プロジェクト: balamuruhans/tp-libvirt
def prepare_gluster_disk(blk_source, test, **kwargs):
    """
    Set up gluster disk device and replace the domain disk image

    :param blk_source: The domain disk image path
    :param test: Avocado test object
    :param kwargs: Key words for gluster device setup
    :return: host_ip
    """
    vol_name = kwargs.get("vol_name")
    brick_path = kwargs.get("brick_path")
    disk_img = kwargs.get("disk_img")
    disk_format = kwargs.get("disk_format")
    host_ip = utlv.setup_or_cleanup_gluster(True, vol_name, brick_path)
    logging.debug("host ip: %s ", host_ip)
    # Copy the domain disk image to gluster disk path
    image_info = utils_misc.get_image_info(blk_source)
    dest_image = "/mnt/%s" % disk_img
    if image_info["format"] == disk_format:
        disk_cmd = ("cp -f %s %s" % (blk_source, dest_image))
    else:
        disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                    (image_info["format"], disk_format,
                     blk_source, dest_image))
    # Mount the gluster disk and create the image
    src_mnt = "%s:%s" % (host_ip, vol_name)
    if not utils_misc.mount(src_mnt, "/mnt", "glusterfs"):
        test.error("glusterfs mount failed")
    process.run("%s && chmod a+rw /mnt/%s && umount /mnt" %
                (disk_cmd, disk_img), shell=True)
    return host_ip
コード例 #14
0
    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name, brick_path,
                                                   pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = (
                "qemu-img convert -f %s -O %s %s %s" %
                (image_info["format"], disk_format, image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt" %
                    (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip
コード例 #15
0
    def update_disk(vm, params):
        """
        Update disk for testing.

        :param vm: vm object.
        :param params: the parameters used.
        :return: updated images.
        """
        local_image_list = []
        remote_image_list = []
        vm_did_list = []
        # Change the disk of the vm
        if storage_type == "nfs":
            libvirt.set_vm_disk(vm, params)
        else:
            disk_format = params.get("disk_format", "qcow2")
            disk_num = eval(params.get("disk_num", "1"))
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            # Create disk on remote host
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            libvirt_disk.create_disk("file",
                                     disk_format=disk_format,
                                     path=blk_source,
                                     size=vsize,
                                     session=remote_session)
            remote_image_list.append(blk_source)

            for idx in range(2, disk_num + 1):
                disk_path = os.path.join(os.path.dirname(blk_source),
                                         "test%s.img" % str(idx))
                # Create disk on local
                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path)
                local_image_list.append(disk_path)

                target_dev = 'vd' + chr(idx + ord('a') - 1)
                new_disk_dict = {"driver_type": disk_format}
                result = libvirt.attach_additional_device(
                    vm_name, target_dev, disk_path, new_disk_dict, False)
                libvirt.check_exit_status(result)

                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path,
                                         session=remote_session)

                remote_image_list.append(disk_path)
                vm_did_list.append(target_dev)

            remote_session.close()
        return local_image_list, remote_image_list, vm_did_list
コード例 #16
0
ファイル: virt_sysprep.py プロジェクト: FengYang/tp-libvirt
 def modify_source(vm_name, target, dst_image):
     """
     Modify domain's configuration to change its disk source
     """
     try:
         virsh.detach_disk(vm_name, target, extra="--config",
                           ignore_status=False)
         dst_image_format = utils_misc.get_image_info(dst_image)['format']
         options = "--config --subdriver %s" % dst_image_format
         virsh.attach_disk(vm_name, dst_image, target, extra=options,
                           ignore_status=False)
     except (remote.LoginError, virt_vm.VMError,
             aexpect.ShellError), detail:
         raise error.TestFail("Modify guest source failed: %s" % detail)
コード例 #17
0
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
コード例 #18
0
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
コード例 #19
0
 def modify_source(vm_name, target, dst_image):
     """
     Modify domain's configuration to change its disk source
     """
     try:
         virsh.detach_disk(vm_name,
                           target,
                           extra="--config",
                           ignore_status=False)
         dst_image_format = utils_misc.get_image_info(dst_image)['format']
         options = "--config --subdriver %s" % dst_image_format
         virsh.attach_disk(vm_name,
                           dst_image,
                           target,
                           extra=options,
                           ignore_status=False)
     except (remote.LoginError, virt_vm.VMError,
             aexpect.ShellError), detail:
         raise error.TestFail("Modify guest source failed: %s" % detail)
コード例 #20
0
 def __init__(self, test, params):
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.time_out = int(params.get("time_out", "600"))
     self.vm_name = params.get("main_vm")
     self.time_out = int(params.get("time_out", "600"))
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
     disk_node = xml_file.get_disk_all()['vda']
     source_file = disk_node.find('source').get('file')
     image_type = utils_misc.get_image_info(source_file)['format']
     if image_type != "qcow2":
         raise error.TestNAError("Disk image format is not qcow2, "
                                 "ignore snapshot test!")
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.current_snp_list = []
     self.snp_list = virsh.snapshot_list(self.vm_name)
     env = params.get("env")
     vm = env.get_vm(self.vm_name)
     # This can add snapshot create time
     vm.wait_for_login()
コード例 #21
0
    def check_image_info(self, image_path, check_item, expected_value):
        """
        Check value is expected in image info

        :param image_path: image path
        :param check_item: The item you want to check.
        :param expected_value: expected item value
        """
        image_info = utils_misc.get_image_info(image_path)

        if image_info.get(check_item) is None:
            self.test.fail("The {} value:{} you checked is"
                           " not returned in image_info:{}".
                           format(check_item, expected_value, image_info))
        else:
            actual_value = image_info[check_item]
            # Get actual value
            if check_item == 'vsize':
                expected_value = self._get_image_size_with_bytes(expected_value)
            # check item value
            if actual_value != expected_value:
                self.test.fail('The value :{} is not expected value:'
                               '{}'.format(actual_value, expected_value))
コード例 #22
0
    def _create_disk_image_on_dest(self):
        """
        Create disk image on dest host before migration
        Used for live vm migration with disk copy

        Note:
        This method doesn't handle the backing chain setup. So you need to setup
        the disk image backing chain by yourself if --copy-storage-inc is used

        """
        logging.debug("Create disk image on dest host before migration")
        all_vm_disks = self.main_vm.get_blk_devices()
        for disk in list(itervalues(all_vm_disks)):
            disk_type = disk.get("type")
            disk_path = disk.get("source")
            image_info = utils_misc.get_image_info(disk_path)
            disk_size = image_info.get("vsize")
            disk_format = image_info.get("format")
            utils_misc.make_dirs(os.path.dirname(disk_path),
                                 self.remote_session)
            libvirt_disk.create_disk(disk_type, path=disk_path,
                                     size=disk_size, disk_format=disk_format,
                                     session=self.remote_session)
コード例 #23
0
 def __init__(self, test, params):
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.time_out = int(params.get("time_out", "600"))
     self.vm_name = params.get("main_vm")
     self.time_out = int(params.get("time_out", "600"))
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
     disk_node = xml_file.get_disk_all()['vda']
     source_file = disk_node.find('source').get('file')
     image_type = utils_misc.get_image_info(source_file)['format']
     if image_type != "qcow2":
         raise error.TestNAError("Disk image format is not qcow2, "
                                 "ignore snapshot test!")
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.current_snp_list = []
     self.snp_list = virsh.snapshot_list(self.vm_name)
     env = params.get("env")
     vm = env.get_vm(self.vm_name)
     # This can add snapshot create time
     vm.wait_for_login()
コード例 #24
0
def get_expect_info(new_capacity, vol_path, resize_option=None):
    """
    Get the expect volume capacity and allocation size, for comparation
    after volume resize. As virsh vol-info return imprecise values, so we
    need get volume info from qemu side. The process is:
    1) Transform new capacity size to bytes.
    2) Get image info by qemu-img info(byte size).
    3) Calculate the expect info according to volume resie option.

    :param new_capacity: New capacity for the vol, as scaled integer
    :param vol_path: Absolute path of volume
    :return: Expect volume capacity and allocation
    """
    if new_capacity.isdigit():
        # Default bytes
        new_capacity = new_capacity + "b"

    suffixes_list1 = ['B', 'K', 'KIB', 'M', 'MIB', 'G', 'GIB', 'T', 'TIB']
    suffixes_list2 = ['KB', 'MB', 'GB', 'TB']
    expect_info = {}
    suffix = "B"
    factor = "1024"
    try:
        suffix = re.findall(r"[\s\d](\D+)", new_capacity, re.I)[-1].strip()
    except IndexError:
        raise error.TestError("Incorrect size format %s." % new_capacity)
    if suffix in suffixes_list1:
        factor = "1024"
    elif suffix in suffixes_list2:
        factor = "1000"
    else:
        raise error.TestError("Unsupport size unit '%s'." % suffix)

    try:
        # Transform the size to bytes
        new_size = utils_misc.normalize_data_size(new_capacity, "B", factor)

        # Get image info
        img_info = utils_misc.get_image_info(vol_path)

        # Init expect_info
        expect_info['Capacity'] = img_info['vsize']
        expect_info['Allocation'] = img_info['dsize']

        # Deal with resize options
        if not resize_option:
            expect_info['Capacity'] = int(float(new_size))
            return expect_info
        support_options = ["--allocate", "--delta", "--shrink"]
        find_delt = False
        find_allo = False
        for option in resize_option.split():
            logging.debug("Find '%s' in volume resize option", option)
            if option not in support_options:
                # Give an invalid option is acceptable in the test, so just
                # output debug log
                logging.debug("Invalid resize option: %s.", option)
                return expect_info
            if option == "--shrink":
                # vol-resize --shrink has a bug now, so output error
                logging.error("Shrink volume not support in this test.")
                return expect_info
            if option == "--allocate":
                find_allo = True
                logging.debug("Allocate the new capacity, rather than "
                              "leaving it sparse.")
            if option == "--delta":
                find_delt = True
                logging.debug("Use capacity as a delta to current size, "
                              "rather than the new size")
        if find_allo and find_delt:
            expect_info['Capacity'] += int(float(new_size))
            expect_info['Allocation'] += int(float(new_size))
        elif find_allo:
            expect_info['Capacity'] = int(float(new_size))
            expect_info['Allocation'] += int(
                float(new_size)) - img_info['vsize']
        elif find_delt:
            expect_info['Capacity'] += int(float(new_size))
        else:
            pass
        return expect_info
    except (IndexError, ValueError), detail:
        raise error.TestError("Fail to get expect volume info:\n%s" % detail)
コード例 #25
0
def run(test, params, env):
    """
    Test virsh migrate when disks are virtio-scsi.
    """

    def check_vm_state(vm, state):
        """
        Return True if vm is in the correct state.
        """
        try:
            actual_state = vm.state()
        except process.CmdError:
            return False
        if actual_state == state:
            return True
        else:
            return False

    def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None):
        """
        Check disks attached to vm.
        """
        fail_list = []
        while len(disks_list):
            disk = disks_list.pop()
            if runner:
                check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 "
                             "count=1024\"" % (vm_ip, disk))
                try:
                    logging.debug(runner.run(check_cmd))
                    continue
                except process.CmdError as detail:
                    logging.debug("Remote checking failed:%s", detail)
                    fail_list.append(disk)
            else:
                check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024"
                session = vm.wait_for_login()
                cs = session.cmd_status(check_cmd)
                if cs:
                    fail_list.append(disk)
                session.close()
        if len(fail_list):
            test.fail("Checking attached devices failed:%s"
                      % fail_list)

    def get_disk_id(device):
        """
        Show disk by id.
        """
        output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text
        for line in output.splitlines():
            disk_ids = line.split()
            for disk_id in disk_ids:
                disk = os.path.basename(
                    process.run("readlink %s" % disk_id, shell=True).stdout_text)
                if disk == os.path.basename(device):
                    return disk_id
        return None

    def cleanup_ssh_config(vm):
        session = vm.wait_for_login()
        session.cmd("rm -f ~/.ssh/authorized_keys")
        session.cmd("rm -f ~/.ssh/id_rsa*")
        session.close()

    vm = env.get_vm(params.get("migrate_main_vm"))
    source_type = params.get("disk_source_type", "file")
    device_type = params.get("disk_device_type", "disk")
    disk_format = params.get("disk_format_type", "raw")
    if source_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
        block_device = params.get("disk_block_device", "/dev/EXAMPLE")
        if block_device.count("EXAMPLE"):
            # Prepare host parameters
            local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
            remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
            remote_user = params.get("migrate_dest_user", "root")
            remote_passwd = params.get("migrate_dest_pwd")
            if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
                test.cancel("Config remote or local host first.")
            rdm_params = {'remote_ip': remote_host,
                          'remote_user': remote_user,
                          'remote_pwd': remote_passwd}
            rdm = utils_test.RemoteDiskManager(rdm_params)
            # Try to build an iscsi device
            # For local, target is a device name
            target = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                 emulated_image="emulated-iscsi")
            logging.debug("Created target: %s", target)
            try:
                # Attach this iscsi device both local and remote
                remote_device = rdm.iscsi_login_setup(local_host, target)
            except Exception as detail:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Attach iscsi device on remote failed:%s"
                           % detail)

            # Use id to get same path on local and remote
            block_device = get_disk_id(target)
            if block_device is None:
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Set iscsi device couldn't find id?")

    srcuri = params.get("virsh_migrate_srcuri")
    dsturi = params.get("virsh_migrate_dsturi")
    remote_ip = params.get("remote_ip")
    username = params.get("remote_user", "root")
    host_pwd = params.get("remote_pwd")
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        test.cancel(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        test.cancel(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name, device, "--config",
                                         debug=True)
            if not s_detach:
                test.error("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name, sys_image_source, "vda",
                          attach_args, debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                if status_error:
                    logging.debug("Expected failure:%s", detail)
                    return None, None
                else:
                    raise
            vm.wait_for_login()

            # Confirm VM can be accessed through network.
            # And this ip will be used on remote after migration
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60)
            logging.info(o_ping)
            if s_ping != 0:
                test.fail("%s did not respond after several "
                          "seconds with attaching new devices."
                          % vm.name)
            return vm_ip, vm_pwd

        options = "--live --unsafe"
        # Do migration before attaching new devices
        if migrate_in_advance:
            vm_ip, vm_pwd = start_check_vm(vm)
            cleanup_ssh_config(vm)
            mig_thread = threading.Thread(target=mig.thread_func_migration,
                                          args=(vm, dsturi, options))
            mig_thread.start()
            # Make sure migration is running
            time.sleep(2)

        # Attach other disks
        params['added_disk_target'] = "scsi"
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path,
                                  None, params)
            else:
                ret = utlv.attach_additional_device(vm.name, "sda", block_device,
                                                    params, config=attach_disk_config)
                if ret.exit_status:
                    test.fail(ret)
        except (exceptions.TestFail, process.CmdError) as detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
        else:
            vm_ip, vm_pwd = start_check_vm(vm)

        # Have got expected failures when starting vm, end the test
        if vm_ip is None and status_error:
            return

        # Start checking before migration and go on checking after migration
        disks = []
        for target in list(vm.get_disk_devices().keys()):
            if target != "vda":
                disks.append("/dev/%s" % target)

        checked_count = int(params.get("checked_count", 0))
        disks_before = disks[:(checked_count // 2)]
        disks_after = disks[(checked_count // 2):checked_count]
        logging.debug("Disks to be checked:\nBefore migration:%s\n"
                      "After migration:%s", disks_before, disks_after)

        options = "--live --unsafe"
        if not migrate_in_advance:
            cleanup_ssh_config(vm)
            mig.do_migration(vms, None, dsturi, "orderly", options, 120)

        if mig.RET_MIGRATION:
            utils_test.check_dest_vm_network(vm, vm_ip, remote_ip,
                                             username, host_pwd)
            runner = remote.RemoteRunner(host=remote_ip, username=username,
                                         password=host_pwd)
            # After migration, config autologin to vm
            ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd)
            check_disks_in_vm(vm, vm_ip, disks_after, runner)

            if migrate_in_advance:
                test.fail("Migration before attaching successfully, "
                          "but not expected.")

    finally:
        # Cleanup remote vm
        if srcuri != dsturi:
            mig.cleanup_dest_vm(vm, srcuri, dsturi)
        # Cleanup created vm anyway
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(new_vm_name)

        # Cleanup iscsi device for block if it is necessary
        if source_type == "block":
            if params.get("disk_block_device",
                          "/dev/EXAMPLE").count("EXAMPLE"):
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False,
                                            emulated_image="emulated-iscsi")

        if runner:
            runner.session.close()
        process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
コード例 #26
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(test.tmpdir, pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")

    if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            raise error.TestNAError("Option --prealloc-metadata "
                                    "is not supported.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_misc.find_command('scrub')
    except ValueError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = [
            "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7",
            "pfitzner33", "random"
        ]

    # Choose an algorithms randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    del_pool = True
    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        while libv_pool.pool_exists(pool_name):
            logging.debug("Use exist pool '%s'", pool_name)
            del_pool = False
        else:
            # Create a new pool
            disk_vol = []
            if pool_type == 'disk':
                disk_vol.append(params.get("pre_vol", '10M'))
            libv_pvt.pre_pool(pool_name=pool_name,
                              pool_type=pool_type,
                              pool_target=pool_target,
                              emulated_image=emulated_image,
                              image_size=emulated_image_size,
                              pre_disk_vol=disk_vol)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        if libv_vol.volume_exists(vol_name):
            logging.debug("Use exist volume '%s'", vol_name)
        elif vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            # Create a new volume
            libv_pvt.pre_vol(vol_name=vol_name,
                             vol_format=vol_format,
                             capacity=vol_capability,
                             allocation=None,
                             pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = libv_vol.list_volumes().keys()[0]
            logging.debug("Partition %s in disk pool is volume" % vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            utils.run(cmd)
        else:
            raise error.TestError("Unknown volume format %s" % vol_format)
        # Refresh the pool
        virsh.pool_refresh(pool_name)
        vol_info = libv_vol.volume_info(vol_name)
        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count(
                "prealloc-metadata"):
            clone_status_error = True

        if pool_type == "disk":
            new_vol_name = libvirt.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                raise error.TestError("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                libvirt.update_polkit_rule(params, vol_pat, new_value)
        # Clone volume
        clone_result = virsh.vol_clone(vol_name,
                                       new_vol_name,
                                       pool_name,
                                       clone_option,
                                       debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                raise error.TestFail("Clone volume fail:\n%s" %
                                     clone_result.stderr.strip())
            else:
                vol_info = libv_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name,
                                             pool_name,
                                             alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri,
                                             debug=True)
                unsupported_err = [
                    "Unsupported algorithm", "no such pattern sequence"
                ]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr
                               for err in unsupported_err):
                            raise error.TestNAError(wipe_result.stderr)
                        raise error.TestFail("Wipe volume fail:\n%s" %
                                             clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libv_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                raise error.TestFail("Expect wiped volume "
                                                     "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    raise error.TestFail("Expect wipe volume fail, but run"
                                         " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            raise error.TestFail("Expect clone volume fail, but run"
                                 " successfully.")
    finally:
        # Clean up
        try:
            if del_pool:
                libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                      emulated_image)
            else:
                # Only delete the volumes
                libv_vol = libvirt_storage.PoolVolume(pool_name)
                for vol in [vol_name, new_vol_name]:
                    libv_vol.delete_volume(vol)
        except error.TestFail, detail:
            logging.error(str(detail))
コード例 #27
0
def get_expect_info(new_capacity, vol_path, resize_option=None):
    """
    Get the expect volume capacity and allocation size, for comparation
    after volume resize. As virsh vol-info return imprecise values, so we
    need get volume info from qemu side. The process is:
    1) Transform new capacity size to bytes.
    2) Get image info by qemu-img info(byte size).
    3) Calculate the expect info according to volume resie option.

    :param new_capacity: New capacity for the vol, as scaled integer
    :param vol_path: Absolute path of volume
    :return: Expect volume capacity and allocation
    """
    if new_capacity.isdigit():
        # Default bytes
        new_capacity = new_capacity + "b"

    suffixes_list1 = ['B', 'K', 'KIB', 'M', 'MIB', 'G', 'GIB', 'T', 'TIB']
    suffixes_list2 = ['KB', 'MB', 'GB', 'TB']
    expect_info = {}
    suffix = "B"
    factor = "1024"
    try:
        suffix = re.findall(r"[\s\d](\D+)", new_capacity, re.I)[-1].strip()
    except IndexError:
        raise error.TestError("Incorrect size format %s." % new_capacity)
    if suffix in suffixes_list1:
        factor = "1024"
    elif suffix in suffixes_list2:
        factor = "1000"
    else:
        raise error.TestError("Unsupport size unit '%s'." % suffix)

    try:
        # Transform the size to bytes
        new_size = utils_misc.normalize_data_size(new_capacity, "B", factor)

        # Get image info
        img_info = utils_misc.get_image_info(vol_path)

        # Init expect_info
        expect_info['Capacity'] = img_info['vsize']
        expect_info['Allocation'] = img_info['dsize']

        # Deal with resize options
        if not resize_option:
            expect_info['Capacity'] = int(float(new_size))
            return expect_info
        support_options = ["--allocate", "--delta", "--shrink"]
        find_delt = False
        find_allo = False
        for option in resize_option.split():
            logging.debug("Find '%s' in volume resize option", option)
            if option not in support_options:
                # Give an invalid option is acceptable in the test, so just
                # output debug log
                logging.debug("Invalid resize option: %s.", option)
                return expect_info
            if option == "--shrink":
                # vol-resize --shrink has a bug now, so output error
                logging.error("Shrink volume not support in this test.")
                return expect_info
            if option == "--allocate":
                find_allo = True
                logging.debug("Allocate the new capacity, rather than "
                              "leaving it sparse.")
            if option == "--delta":
                find_delt = True
                logging.debug("Use capacity as a delta to current size, "
                              "rather than the new size")
        if find_allo and find_delt:
            expect_info['Capacity'] += int(float(new_size))
            expect_info['Allocation'] += int(float(new_size))
        elif find_allo:
            expect_info['Capacity'] = int(float(new_size))
            expect_info['Allocation'] += int(float(new_size)) - img_info['vsize']
        elif find_delt:
            expect_info['Capacity'] += int(float(new_size))
        else:
            pass
        return expect_info
    except (IndexError, ValueError), detail:
        raise error.TestError("Fail to get expect volume info:\n%s" % detail)
コード例 #28
0
def run(test, params, env):
    """
    Test migration with glusterfs.
    """
    def create_or_clean_backend_dir(g_uri,
                                    params,
                                    session=None,
                                    is_clean=False):
        """
        Create/cleanup backend directory

        :params g_uri: glusterfs uri
        :params params: the parameters to be checked
        :params session: VM/remote session object
        :params is_cleanup: True for cleanup backend directory;
                            False for create one.
        :return: gluster_img if is_clean is equal to True
        """
        mount_point = params.get("gluster_mount_dir")
        is_symlink = params.get("gluster_create_symlink") == "yes"
        symlink_name = params.get("gluster_symlink")
        gluster_img = None
        if not is_clean:
            if not utils_misc.check_exists(mount_point, session):
                utils_misc.make_dirs(mount_point, session)

            if gluster.glusterfs_is_mounted(mount_point, session):
                gluster.glusterfs_umount(g_uri, mount_point, session)
            gluster.glusterfs_mount(g_uri, mount_point, session)

            gluster_img = os.path.join(mount_point, disk_img)
            if is_symlink:
                utils_misc.make_symlink(mount_point, symlink_name)
                utils_misc.make_symlink(mount_point, symlink_name,
                                        remote_session)
                gluster_img = os.path.join(symlink_name, disk_img)
            return gluster_img
        else:
            if is_symlink:
                utils_misc.rm_link(symlink_name, session)

            gluster.glusterfs_umount(g_uri, mount_point, session)
            if utils_misc.check_exists(mount_point, session):
                utils_misc.safe_rmdir(gluster_mount_dir, session=session)

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    virsh_options = params.get("virsh_options", "")

    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    gluster_mount_dir = params.get("gluster_mount_dir")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")
    host_ip = params.get("gluster_server_ip", "")
    migrate_vm_back = params.get("migrate_vm_back", "no") == "yes"

    selinux_local = params.get('set_sebool_local', 'yes') == "yes"
    selinux_remote = params.get('set_sebool_remote', 'no') == "yes"
    sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes')
    sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes')
    test_dict = dict(params)
    test_dict["local_boolean_varible"] = "virt_use_fusefs"
    test_dict["remote_boolean_varible"] = "virt_use_fusefs"
    remote_dargs = {
        'server_ip': server_ip,
        'server_user': server_user,
        'server_pwd': server_pwd,
        'file_path': "/etc/libvirt/libvirt.conf"
    }

    remove_pkg = False
    seLinuxBool = None
    seLinuxfusefs = None
    gluster_uri = None
    mig_result = None
    remove_dict = {}
    remote_libvirt_file = None
    src_libvirt_file = None

    # Make sure all of parameters are assigned a valid value
    migrate_test = migration.MigrationTest()
    migrate_test.check_parameters(params)
    extra_args = migrate_test.update_virsh_migrate_extra_args(params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    # For --postcopy enable
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (virsh_options, postcopy_options)
        func_name = virsh.migrate_postcopy

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Back up xml file.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Configure selinux
        if selinux_local or selinux_remote:
            seLinuxBool = utils_misc.SELinuxBoolean(params)
            seLinuxBool.setup()
            if sebool_fusefs_local or sebool_fusefs_remote:
                seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict)
                seLinuxfusefs.setup()

        # Setup glusterfs
        disk_img = "gluster.%s" % disk_format
        params['disk_img'] = disk_img
        host_ip = gluster.setup_or_cleanup_gluster(is_setup=True, **params)
        logging.debug("host ip: %s ", host_ip)

        # Check if gluster server is deployed locally
        if not host_ip:
            logging.debug("Enable port 24007 and 49152:49216")
            migrate_test.migrate_pre_setup(src_uri, params, ports="24007")
            migrate_test.migrate_pre_setup(src_uri, params)
            gluster_uri = "{}:{}".format(client_ip, vol_name)
        else:
            gluster_uri = "{}:{}".format(host_ip, vol_name)

        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")

        if gluster_mount_dir:
            # The package 'glusterfs-fuse' is not installed on target
            # which makes issue when trying to 'mount -t glusterfs'
            pkg_name = 'glusterfs-fuse'
            logging.debug("Check if glusterfs-fuse is installed")
            pkg_mgr = utils_package.package_manager(remote_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("glusterfs-fuse will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)
                else:
                    remove_pkg = True

            gluster_img = create_or_clean_backend_dir(gluster_uri, params)
            create_or_clean_backend_dir(gluster_uri, params, remote_session)

            # Get the image path
            image_source = vm.get_first_disk_devices()['source']
            image_info = utils_misc.get_image_info(image_source)
            if image_info["format"] == disk_format:
                disk_cmd = "cp -f %s %s" % (image_source, gluster_img)
            else:
                # Convert the disk format
                disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                            (image_info["format"], disk_format, image_source,
                             gluster_img))
            process.run("%s; chmod a+rw %s" % (disk_cmd, gluster_mount_dir),
                        shell=True)

            logging.debug("Gluster Image is %s", gluster_img)
            gluster_backend_disk = {'disk_source_name': gluster_img}
            # Update disk xml with gluster image in backend dir
            libvirt.set_vm_disk(vm, gluster_backend_disk)
        remote_session.close()

        vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip()
        logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt)

        vm.wait_for_login().close()
        migrate_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        vms = [vm]
        migrate_test.do_migration(vms,
                                  None,
                                  dest_uri,
                                  'orderly',
                                  options,
                                  thread_timeout=900,
                                  ignore_status=True,
                                  virsh_opt=virsh_options,
                                  extra_opts=extra,
                                  **extra_args)
        migrate_test.ping_vm(vm, params, dest_uri)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migrate_test.migrate_pre_setup(src_uri, params)
            remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)}
            remote_libvirt_file = libvirt_config\
                .remove_key_for_modular_daemon(remove_dict, remote_dargs)

            cmd = "virsh migrate %s %s %s %s" % (vm_name, options,
                                                 virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)

            if cmd_result.exit_status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd,
                                      params,
                                      runner_on_target,
                                      ignore_status=False)
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.info("Recover test environment")
        migrate_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()

        if src_libvirt_file:
            src_libvirt_file.restore()
        if remote_libvirt_file:
            del remote_libvirt_file

        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migrate_test.migrate_pre_setup(src_uri, params, cleanup=True)

        # Cleanup selinu configuration
        if seLinuxBool:
            seLinuxBool.cleanup()
            if seLinuxfusefs:
                seLinuxfusefs.cleanup()

        # Disable ports 24007 and 49152:49216
        if not host_ip:
            logging.debug("Disable 24007 and 49152:49216 in Firewall")
            migrate_test.migrate_pre_setup(src_uri,
                                           params,
                                           cleanup=True,
                                           ports="24007")
            migrate_test.migrate_pre_setup(src_uri, params, cleanup=True)

        gluster.setup_or_cleanup_gluster(False, **params)

        # Cleanup backend directory/symlink
        if gluster_mount_dir and gluster_uri:
            remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            create_or_clean_backend_dir(gluster_uri, params, is_clean=True)
            create_or_clean_backend_dir(gluster_uri, params, remote_session,
                                        True)
            if remove_pkg:
                pkg_mgr = utils_package.package_manager(
                    remote_session, pkg_name)
                if pkg_mgr.is_installed(pkg_name):
                    logging.debug("glusterfs-fuse will be uninstalled")
                    if not pkg_mgr.remove():
                        logging.error("Package '%s' un-installation fails",
                                      pkg_name)
            remote_session.close()
コード例 #29
0
def run(test, params, env):
    """
    Test virsh blockresize command for block device of domain.

    1) Init the variables from params.
    2) Create an image with specified format.
    3) Attach a disk image to vm.
    4) Test blockresize for the disk
    5) Detach the disk
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm", "virt-tests-vm1")
    image_format = params.get("disk_image_format", "qcow2")
    initial_disk_size = params.get("initial_disk_size", "500K")
    status_error = "yes" == params.get("status_error", "yes")
    resize_value = params.get("resize_value")
    virsh_dargs = {'debug': True}
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy()
        vm.start()

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    # Create an image.
    tmp_dir = data_dir.get_tmp_dir()
    image_path = os.path.join(tmp_dir, "blockresize_test")
    logging.info("Create image: %s, "
                 "size %s, "
                 "format %s", image_path, initial_disk_size, image_format)

    cmd = "qemu-img create -f %s %s %s" % (image_format, image_path,
                                           initial_disk_size)
    status, output = commands.getstatusoutput(cmd)
    if status:
        raise error.TestError("Creating image file %s failed: %s" %
                              (image_path, output))

    # Hotplug the image as disk device
    result = virsh.attach_disk(vm_name,
                               source=image_path,
                               target="vdd",
                               extra=" --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestError("Failed to attach disk %s to VM: %s." %
                              (image_path, result.stderr))

    if resize_value == "over_size":
        # Use byte unit for over_size test
        resize_value = "%s" % OVER_SIZE + "b"

    # Run the test
    try:
        result = virsh.blockresize(vm_name, image_path, resize_value,
                                   **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect failure, but run successfully!")
            # No need to do more test
            return
        else:
            if status != 0 or err != "":
                # bz 1002813 will result in an error on this
                err_str = "unable to execute QEMU command 'block_resize': Could not resize: Invalid argument"
                if resize_value[-2] in "kb" and re.search(err_str, err):
                    raise error.TestNAError("BZ 1002813 not yet applied")
                else:
                    raise error.TestFail("Run failed with right "
                                         "virsh blockresize command")

        # Although kb should not be used, libvirt/virsh will accept it and
        # consider it as a 1000 bytes, which caused issues for qed & qcow2
        # since they expect a value evenly divisible by 512 (hence bz 1002813).
        if "kb" in resize_value:
            value = int(resize_value[:-2])
            if image_format in ["qed", "qcow2"]:
                # qcow2 and qed want a VIR_ROUND_UP value based on 512 byte
                # sectors - hence this less than visually appealing formula
                expected_size = (((value * 1000) + 512 - 1) / 512) * 512
            else:
                # Raw images...
                # Ugh - there's some rather ugly looking math when kb
                # (or mb, gb, tb, etc.) are used as the scale for the
                # value to create an image. The blockresize for the
                # running VM uses a qemu json call which differs from
                # qemu-img would do - resulting in (to say the least)
                # awkward sizes. We'll just have to make sure we don't
                # deviates more than a sector.
                expected_size = value * 1000
        elif "kib" in resize_value:
            value = int(resize_value[:-3])
            expected_size = value * 1024
        elif resize_value[-1] in "b":
            expected_size = int(resize_value[:-1])
        elif resize_value[-1] in "k":
            value = int(resize_value[:-1])
            expected_size = value * 1024
        elif resize_value[-1] == "m":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024
        elif resize_value[-1] == "g":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024 * 1024
        else:
            raise error.TestError("Unknown scale value")

        image_info = utils_misc.get_image_info(image_path)
        actual_size = int(image_info['vsize'])

        logging.info(
            "The expected block size is %s bytes, "
            "the actual block size is %s bytes", expected_size, actual_size)

        # See comment above regarding Raw images
        if image_format == "raw" and resize_value[-2] in "kb":
            if abs(int(actual_size) - int(expected_size)) > 512:
                raise error.TestFail("New raw blocksize set by blockresize do "
                                     "not match the expected value")
        else:
            if int(actual_size) != int(expected_size):
                raise error.TestFail("New blocksize set by blockresize is "
                                     "different from actual size from "
                                     "'qemu-img info'")
    finally:
        virsh.detach_disk(vm_name, target="vdd")

        if os.path.exists(image_path):
            os.remove(image_path)
コード例 #30
0
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    sparse_option_support = "yes" == params.get("sparse_option_support", "yes")
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    vol_format = params.get("vol_format", "qcow2")
    libvirt_version.is_libvirt_feature_supported(params)

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    # Destroy VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                if with_clusterSize:
                    vol_arg['format'] = vol_format
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        virsh.pool_refresh(pool_name, debug=True)
        vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            # Due to BZ 1843791, the volume cannot be obtained sometimes.
            if len(vol_list.splitlines()) < 3:
                test.fail("Failed to get iscsi type volume.")
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write data to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name, file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write data to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name, file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if operation != "mix":
            if result.exit_status != 0:
                test.fail("Fail to %s volume: %s" %
                          (operation, result.stderr))
            # Compare the change part on volume and file
            if ori_digest == aft_digest:
                logging.info("file digests match, volume %s succeed", operation)
            else:
                test.fail("file digests do not match, volume %s failed"
                          % operation)

        if operation == "mix":
            target = params.get("virt_disk_device_target", "vdb")
            disk_file_path = os.path.join(pool_target, file_name)

            # Create one disk xml and attach it to VM.
            custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file',
                                          'disk', target, 'virtio')
            ret = virsh.attach_device(vm_name, custom_disk_xml.xml,
                                      flagstr="--config", debug=True)
            libvirt.check_exit_status(ret)
            if vm.is_dead():
                vm.start()

            # Write 100M data into disk.
            data_size = 100
            write_disk(test, vm, target, data_size)
            data_size_in_bytes = data_size * 1024 * 1024

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)

            # Download volume to local with sparse option.
            download_spare_file = "download-sparse.raw"
            download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file)
            options += " --sparse"
            result = virsh.vol_download(file_name, download_file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            libvirt.check_exit_status(result)

            #Check download image size.
            one_g_in_bytes = 1073741824
            download_img_info = utils_misc.get_image_info(download_file_path)
            download_disk_size = int(download_img_info['dsize'])
            if (download_disk_size < data_size_in_bytes or
               download_disk_size >= one_g_in_bytes):
                test.fail("download image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (download_disk_size, data_size_in_bytes))

            # Create one upload sparse image file.
            upload_sparse_file = "upload-sparse.raw"
            upload_file_path = os.path.join(pool_target, upload_sparse_file)
            libvirt.create_local_disk('file', upload_file_path, '1', 'raw')

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)
            # Do volume upload, upload sparse file which download last time.
            result = virsh.vol_upload(upload_sparse_file, download_file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            upload_img_info = utils_misc.get_image_info(upload_file_path)
            upload_disk_size = int(upload_img_info['dsize'])
            if (upload_disk_size < data_size_in_bytes or
               upload_disk_size >= one_g_in_bytes):
                test.fail("upload image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (upload_disk_size, data_size_in_bytes))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
コード例 #31
0
    exception = False
    try:
        # Change the disk of the vm to shared disk
        if vm.is_alive():
            vm.destroy(gracefully=False)

        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm_name,
                                         device,
                                         "--config",
                                         debug=True)
            if not s_detach:
                logging.error("Detach vda failed before test.")

        subdriver = utils_misc.get_image_info(shared_storage)['format']
        extra_attach = ("--config --driver qemu --subdriver %s --cache none" %
                        subdriver)
        s_attach = virsh.attach_disk(vm_name,
                                     shared_storage,
                                     "vda",
                                     extra_attach,
                                     debug=True)
        if s_attach.exit_status != 0:
            logging.error("Attach vda failed before test.")

        vm.start()
        vm.wait_for_login()

        # Confirm VM can be accessed through network.
        time.sleep(delay)
コード例 #32
0
ファイル: blockcopy.py プロジェクト: yalzhang/tp-libvirt
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    case = params.get('case', '')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    libvirt_version.is_libvirt_feature_supported(params)

    file_to_del = []
    tmp_dir = data_dir.get_data_dir()

    try:
        if case:
            if case == 'reuse_external':
                # Create a transient vm for test
                vm.undefine()
                virsh.create(vmxml.xml)

                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                image_file = all_disks[0].find('source').get('file')
                disk_dev = all_disks[0].find('target').get('dev')
                logging.debug('Image file of vm: %s', image_file)

                # Get image info
                image_info = utils_misc.get_image_info(image_file)
                logging.info('Image info: %s', image_info)

                # Get Virtual size of the image file
                vsize = image_info['vsize'] / 1073741824.0
                logging.info('Virtual size of image file: %f', vsize)

                new_image_size = vsize
                image_dir = '/'.join(image_file.split('/')[:-1])
                new_image_path = os.path.join(
                    image_dir,
                    'new_image_' + utils_misc.generate_random_string(3))
                file_to_del.append(new_image_path)

                # Create new image file
                cmd_image_create = 'qemu-img create -f qcow2 %s %fG' % (
                    new_image_path, new_image_size)
                process.run(cmd_image_create, shell=True, verbose=True)

                # Do blockcopy with --reuse-external option
                virsh.blockcopy(vm_name,
                                disk_dev,
                                new_image_path,
                                options='--verbose --wait --reuse-external',
                                debug=True,
                                ignore_status=False)
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--pivot',
                               debug=True,
                               ignore_status=False)
                logging.debug('Current vm xml: %s', vmxml)

                # Current disk source file should be new image
                cur_disks = vmxml.get_disk_source(vm_name)
                cur_sfile = cur_disks[0].find('source').get('file')
                logging.debug('Now disk source file is: %s', cur_sfile)
                if cur_sfile.strip() != new_image_path:
                    test.fail('Disk source file is not updated.')
            if case == 'custom_cluster_size':

                def update_vm_with_cluster_disk():
                    """
                    Update vm's first disk with a image which has customized
                    cluster size

                    :return: The source image params
                    """
                    source_img_params = params.copy()
                    source_img_params['image_name'] = params.get(
                        'source_image_name', 'source_image')
                    source_img = qemu_storage.QemuImg(source_img_params,
                                                      tmp_dir, '')
                    source_img_path, _ = source_img.create(source_img_params)
                    file_to_del.append(source_img_path)
                    source_img_params['disk_source_name'] = source_img_path
                    libvirt.set_vm_disk(vm, source_img_params)
                    return source_img_params

                source_img_params = update_vm_with_cluster_disk()
                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                disk_dev = all_disks[0].find('target').get('dev')

                # Blockcopy the source image to the target image path
                target_img_params = source_img_params.copy()
                target_img_name = params.get('target_image_name',
                                             'target_image')
                target_img_params['image_name'] = target_img_name
                target_img_path = os.path.join(tmp_dir,
                                               target_img_name + '.qcow2')
                file_to_del.append(target_img_path)
                virsh.blockcopy(vm_name,
                                disk_dev,
                                target_img_path,
                                options='--verbose --wait --transient-job',
                                debug=True,
                                ignore_status=False)
                target_img = qemu_storage.QemuImg(target_img_params, tmp_dir,
                                                  '')
                target_img_info = json.loads(
                    target_img.info(force_share=True, output='json'))

                # Compare the source and target images' cluster size
                source_img_cluster = str(
                    source_img_params.get('image_cluster_size'))
                target_img_cluster = str(target_img_info['cluster-size'])
                if source_img_cluster != target_img_cluster:
                    test.fail("Images have different cluster size:\n"
                              "Source image cluster size: %s\n"
                              "Target image cluster size: %s" %
                              (source_img_cluster, target_img_cluster))

                # Abort the blockcopy job
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--abort',
                               debug=True,
                               ignore_status=False)

    finally:
        if case == 'reuse_external':
            # Recover vm and remove the transient vm
            virsh.destroy(vm_name, debug=True)
            virsh.define(bkxml.xml, debug=True)
        bkxml.sync()

        # Remove files to be deleted
        if file_to_del:
            for item in file_to_del:
                if os.path.exists(item):
                    os.remove(item)
コード例 #33
0
ファイル: nbdfuse.py プロジェクト: xiaodwan/tp-libvirt
def run(test, params, env):
    """
    Use qemu-nbd to read and modify a qcow2 file

    1) qemu-img create test.qcow2 -f qcow2 1G
    2) nbdfuse mountpoint/nbdtest/ --socket-activation qemu-nbd -f qcow2 test.qcow2
    3) check image format is 'raw' in mountpoint/nbdtest/
    4) fusermount -u mountpoint/nbdtest/
    """
    image_qcow2_size = params.get('image_qcow2_size', '512M')
    image_qcow2_path = params.get('image_qcow2_path')

    nbdfuse_mp_filename = params.get('nbdfuse_mp_filename', '')

    try:
        temp_dir = data_dir.get_tmp_dir()
        # mountpoint of nbdfuse
        nbdfuse_mp = os.path.join(temp_dir, "nbdfuse_mp")
        if not os.path.exists(nbdfuse_mp):
            os.makedirs(nbdfuse_mp)

        if not image_qcow2_path:
            image_qcow2_path = os.path.join(temp_dir, "nbdfuse_test.qcow2")
        nbdfuse_mp_filename = os.path.join(nbdfuse_mp, nbdfuse_mp_filename)

        # Prepare and qcow2 image
        cmd = "qemu-img create %s -f qcow2 %s" % (image_qcow2_path,
                                                  image_qcow2_size)
        process.run(cmd, verbose=True, ignore_status=False, shell=True)
        image_info_qcow2 = utils_misc.get_image_info(image_qcow2_path)

        # Must have the '&' at the end
        nbdfuse_cmd = "nbdfuse %s --socket-activation qemu-nbd -f qcow2 %s &" % (
            nbdfuse_mp_filename, image_qcow2_path)
        # Must set ignore_bg_processes=True because nbdfuse is serving like
        # a deamon at background
        # Must set shell=True
        process.run(nbdfuse_cmd,
                    verbose=True,
                    ignore_status=True,
                    shell=True,
                    ignore_bg_processes=True)

        # A protective sleep because above command was ran in background
        time.sleep(3)
        # If nbdfuse_mp_filename is '', change it to nbdfuse's default name
        # 'nbd'
        if nbdfuse_mp_filename.rstrip(os.sep) == nbdfuse_mp.rstrip(os.sep):
            nbdfuse_mp_filename = os.path.join(nbdfuse_mp_filename, 'nbd')

        image_info_raw = utils_misc.wait_for(
            lambda: utils_misc.get_image_info(nbdfuse_mp_filename), timeout=60)

        if not image_info_raw or image_info_raw[
                'format'] != 'raw' or image_info_raw[
                    'vsize'] != image_info_qcow2['vsize']:
            test.fail("nbdfuse test failed: %s" % image_info_raw)
    finally:
        nbdfuse_umount_cmd = "fusermount -u %s" % nbdfuse_mp
        process.run(nbdfuse_umount_cmd,
                    verbose=True,
                    ignore_status=True,
                    shell=True)

        if os.path.exists(image_qcow2_path):
            os.unlink(image_qcow2_path)
コード例 #34
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_allocation = params.get("vol_allocation")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")
    b_luks_encrypted = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    wipe_old_vol = False
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    libvirt_version.is_libvirt_feature_supported(params)

    if virsh.has_command_help_match("vol-clone",
                                    "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            test.cancel("Option --prealloc-metadata " "is not supported.")

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_path.find_command('scrub')
    except utils_path.CmdNotFoundError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = [
            "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7",
            "pfitzner33", "random"
        ]

    # Choose an algorithm randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    libvirt_pvt = utlv.PoolVolumeTest(test, params)
    libvirt_pool = libvirt_storage.StoragePool()
    if libvirt_pool.pool_exists(pool_name):
        test.error("Pool '%s' already exist" % pool_name)
    try:
        # Create a new pool
        disk_vol = []
        if pool_type == 'disk':
            disk_vol.append(params.get("pre_vol", '10M'))
        libvirt_pvt.pre_pool(pool_name=pool_name,
                             pool_type=pool_type,
                             pool_target=pool_target,
                             emulated_image=emulated_image,
                             image_size=emulated_image_size,
                             pre_disk_vol=disk_vol)

        libvirt_vol = libvirt_storage.PoolVolume(pool_name)
        # Create a new volume
        if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            if (b_luks_encrypted and vol_format in ['raw', 'qcow2']):
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS is not supported in current"
                                " libvirt version")
                if vol_format == "qcow2" and not libvirt_version.version_compare(
                        6, 10, 0):
                    test.cancel("Qcow2 format with luks encryption is not"
                                " supported in current libvirt version")
                luks_sec_uuid = create_luks_secret(
                    os.path.join(pool_target, vol_name), encryption_password,
                    test)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(vol_capability)
                vol_arg['allocation'] = int(vol_allocation)
                vol_arg['format'] = vol_format
                if with_clusterSize:
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                libvirt_pvt.pre_vol(vol_name=vol_name,
                                    vol_format=vol_format,
                                    capacity=vol_capability,
                                    allocation=None,
                                    pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = list(utlv.get_vol_list(pool_name).keys())[0]
            logging.debug("Find partition %s in disk pool", vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Unknown volume format %s" % vol_format)

        # Refresh the pool
        virsh.pool_refresh(pool_name, debug=True)
        vol_info = libvirt_vol.volume_info(vol_name)
        if not vol_info:
            test.error("Fail to get info of volume %s" % vol_name)

        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count(
                "prealloc-metadata"):
            clone_status_error = True
        if b_luks_encrypted:
            wipe_old_vol = True

        if pool_type == "disk":
            new_vol_name = utlv.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)

        bad_cloned_vol_name = params.get("bad_cloned_vol_name", "")
        if bad_cloned_vol_name:
            new_vol_name = bad_cloned_vol_name

        # Clone volume
        clone_result = virsh.vol_clone(vol_name,
                                       new_vol_name,
                                       pool_name,
                                       clone_option,
                                       debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                test.fail("Clone volume fail:\n%s" %
                          clone_result.stderr.strip())
            else:
                vol_info = libvirt_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name,
                                             pool_name,
                                             alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri,
                                             debug=True)
                unsupported_err = [
                    "Unsupported algorithm", "no such pattern sequence"
                ]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr
                               for err in unsupported_err):
                            test.cancel(wipe_result.stderr)
                        test.fail("Wipe volume fail:\n%s" %
                                  clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libvirt_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                test.fail("Expect wiped volume "
                                          "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    test.fail("Expect wipe volume fail, but run"
                              " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            test.fail("Expect clone volume fail, but run" " successfully.")

        if wipe_old_vol:
            # Wipe the old volume
            if alg:
                logging.debug("Wiping volume by '%s' algorithm", alg)
            wipe_result = virsh.vol_wipe(vol_name,
                                         pool_name,
                                         alg,
                                         unprivileged_user=unpri_user,
                                         uri=uri,
                                         debug=True)
            unsupported_err = [
                "Unsupported algorithm", "no such pattern sequence"
            ]
            if not wipe_status_error:
                if wipe_result.exit_status != 0:
                    if any(err in wipe_result.stderr
                           for err in unsupported_err):
                        test.cancel(wipe_result.stderr)
                    test.fail("Wipe volume fail:\n%s" %
                              clone_result.stdout.strip())
                else:
                    virsh_vol_info = libvirt_vol.volume_info(vol_name)
                    for key in virsh_vol_info:
                        logging.debug("Wiped volume info(virsh): %s = %s", key,
                                      virsh_vol_info[key])
                    vol_path = virsh.vol_path(vol_name,
                                              pool_name).stdout.strip()
                    qemu_vol_info = utils_misc.get_image_info(vol_path)
                    for key in qemu_vol_info:
                        logging.debug("Wiped volume info(qemu): %s = %s", key,
                                      qemu_vol_info[key])
                        if qemu_vol_info['format'] != 'raw':
                            test.fail("Expect wiped volume " "format is raw")
            elif wipe_status_error and wipe_result.exit_status == 0:
                test.fail("Expect wipe volume fail, but run" " successfully.")

        if bad_cloned_vol_name:
            pattern = "volume name '%s' cannot contain '/'" % new_vol_name
            if re.search(pattern, clone_result.stderr) is None:
                test.fail("vol-clone failed with unexpected reason")
    finally:
        # Clean up
        try:
            libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                     emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)

        except exceptions.TestFail as detail:
            logging.error(str(detail))
コード例 #35
0
ファイル: virsh_migrate.py プロジェクト: FengYang/tp-libvirt
        raise error.TestError("For migration you need to have a shared "
                              "storage.")

    exception = False
    try:
        # Change the disk of the vm to shared disk
        if vm.is_alive():
            vm.destroy(gracefully=False)

        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm_name, device,  "--config", debug=True)
            if not s_detach:
                logging.error("Detach vda failed before test.")

        subdriver = utils_misc.get_image_info(shared_storage)['format']
        extra_attach = ("--config --driver qemu --subdriver %s --cache none"
                        % subdriver)
        s_attach = virsh.attach_disk(vm_name, shared_storage, "vda",
                                     extra_attach, debug=True)
        if s_attach.exit_status != 0:
            logging.error("Attach vda failed before test.")

        vm.start()
        vm.wait_for_login()

        # Confirm VM can be accessed through network.
        time.sleep(delay)
        vm_ip = vm.get_address()
        s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=delay)
        logging.info(o_ping)
コード例 #36
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(test.tmpdir, pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")

    if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            raise error.TestNAError("Option --prealloc-metadata " "is not supported.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_misc.find_command("scrub")
    except ValueError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"]

    # Choose an algorithms randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get("unprivileged_user")
    if unpri_user:
        if unpri_user.count("EXAMPLE"):
            unpri_user = "******"

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            raise error.TestNAError("API acl test not supported in current" " libvirt version.")

    del_pool = True
    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        while libv_pool.pool_exists(pool_name):
            logging.debug("Use exist pool '%s'", pool_name)
            del_pool = False
        else:
            # Create a new pool
            disk_vol = []
            if pool_type == "disk":
                disk_vol.append(params.get("pre_vol", "10M"))
            libv_pvt.pre_pool(
                pool_name=pool_name,
                pool_type=pool_type,
                pool_target=pool_target,
                emulated_image=emulated_image,
                image_size=emulated_image_size,
                pre_disk_vol=disk_vol,
            )
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        if libv_vol.volume_exists(vol_name):
            logging.debug("Use exist volume '%s'", vol_name)
        elif vol_format in ["raw", "qcow2", "qed", "vmdk"]:
            # Create a new volume
            libv_pvt.pre_vol(
                vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name
            )
        elif vol_format == "partition":
            vol_name = libv_vol.list_volumes().keys()[0]
            logging.debug("Partition %s in disk pool is volume" % vol_name)
        elif vol_format == "sparse":
            # Create a sparse file in pool
            sparse_file = pool_target + "/" + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            utils.run(cmd)
        else:
            raise error.TestError("Unknown volume format %s" % vol_format)
        # Refresh the pool
        virsh.pool_refresh(pool_name)
        vol_info = libv_vol.volume_info(vol_name)
        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"):
            clone_status_error = True

        if pool_type == "disk":
            new_vol_name = libvirt.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                raise error.TestError("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                libvirt.update_polkit_rule(params, vol_pat, new_value)
        # Clone volume
        clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                raise error.TestFail("Clone volume fail:\n%s" % clone_result.stderr.strip())
            else:
                vol_info = libv_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key, vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(
                    new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True
                )
                unsupported_err = ["Unsupported algorithm", "no such pattern sequence"]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr for err in unsupported_err):
                            raise error.TestNAError(wipe_result.stderr)
                        raise error.TestFail("Wipe volume fail:\n%s" % clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libv_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key])
                            if qemu_vol_info["format"] != "raw":
                                raise error.TestFail("Expect wiped volume " "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    raise error.TestFail("Expect wipe volume fail, but run" " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            raise error.TestFail("Expect clone volume fail, but run" " successfully.")
    finally:
        # Clean up
        try:
            if del_pool:
                libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
            else:
                # Only delete the volumes
                libv_vol = libvirt_storage.PoolVolume(pool_name)
                for vol in [vol_name, new_vol_name]:
                    libv_vol.delete_volume(vol)
        except error.TestFail, detail:
            logging.error(str(detail))
コード例 #37
0
            if os.path.exists(clone_image):
                os.remove(clone_image)
        except error.CmdError, detail:
            raise error.TestFail("Clean clone guest failed!:%s" % detail)

    sysprep_type = params.get("sysprep_type", 'clone')
    sysprep_target = params.get("sysprep_target", 'guest')
    sysprep_hostname = params.get("sysprep_hostname", 'sysprep_test')
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    disks = vm.get_disk_devices()
    if len(disks):
        disk = disks.values()[0]
        image = disk['source']
        target = disks.keys()[0]
        image_info_dict = utils_misc.get_image_info(image)
        if sysprep_type == "sparsify" and image_info_dict['format'] != 'qcow2':
            raise error.TestNAError("This test case needs qcow2 format image.")
    else:
        raise error.TestError("Can not get disk of %s" % vm_name)

    # Do some prepare action
    vm_clone_name = "%s_clone" % vm_name
    clone_image = "%s_clone.img" % image
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    first_nic = vmxml.get_devices(device_type="interface")[0]
    clean_clone_vm()

    # Clone guest to guest_clone
    dargs = {}
    dargs['files'] = [clone_image]
コード例 #38
0
def run(test, params, env):
    """
    Test virsh blockresize command for block device of domain.

    1) Init the variables from params.
    2) Create an image with specified format.
    3) Attach a disk image to vm.
    4) Test blockresize for the disk
    5) Detach the disk
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm", "virt-tests-vm1")
    image_format = params.get("disk_image_format", "qcow2")
    initial_disk_size = params.get("initial_disk_size", "500K")
    status_error = "yes" == params.get("status_error", "yes")
    resize_value = params.get("resize_value")
    virsh_dargs = {'debug': True}

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    # Create an image.
    tmp_dir = data_dir.get_tmp_dir()
    image_path = os.path.join(tmp_dir, "blockresize_test")
    logging.info("Create image: %s, "
                 "size %s, "
                 "format %s", image_path, initial_disk_size, image_format)

    cmd = "qemu-img create -f %s %s %s" % (image_format, image_path,
                                           initial_disk_size)
    status, output = commands.getstatusoutput(cmd)
    if status:
        raise error.TestError("Creating image file %s failed: %s"
                              % (image_path, output))

    # Hotplug the image as disk device
    result = virsh.attach_disk(vm_name, source=image_path, target="vdd",
                               extra=" --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestError("Failed to attach disk %s to VM: %s."
                              % (image_path, result.stderr))

    if resize_value == "over_size":
        # Use byte unit for over_size test
        resize_value = "%s" % OVER_SIZE + "b"

    # Run the test
    try:
        result = virsh.blockresize(vm_name, image_path,
                                   resize_value, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect failure, but run successfully!")
            # No need to do more test
            return
        else:
            if status != 0 or err != "":
                # bz 1002813 will result in an error on this
                err_str = "unable to execute QEMU command 'block_resize': Could not resize: Invalid argument"
                if resize_value[-2] in "kb" and re.search(err_str, err):
                    raise error.TestNAError("BZ 1002813 not yet applied")
                else:
                    raise error.TestFail("Run failed with right "
                                         "virsh blockresize command")

        # Although kb should not be used, libvirt/virsh will accept it and
        # consider it as a 1000 bytes, which caused issues for qed & qcow2
        # since they expect a value evenly divisible by 512 (hence bz 1002813).
        if "kb" in resize_value:
            value = int(resize_value[:-2])
            if image_format in ["qed", "qcow2"]:
                # qcow2 and qed want a VIR_ROUND_UP value based on 512 byte
                # sectors - hence this less than visually appealing formula
                expected_size = (((value * 1000) + 512 - 1) / 512) * 512
            else:
                # Raw images...
                # Ugh - there's some rather ugly looking math when kb
                # (or mb, gb, tb, etc.) are used as the scale for the
                # value to create an image. The blockresize for the
                # running VM uses a qemu json call which differs from
                # qemu-img would do - resulting in (to say the least)
                # awkward sizes. We'll just have to make sure we don't
                # deviates more than a sector.
                expected_size = value * 1000
        elif "kib" in resize_value:
            value = int(resize_value[:-3])
            expected_size = value * 1024
        elif resize_value[-1] in "b":
            expected_size = int(resize_value[:-1])
        elif resize_value[-1] in "k":
            value = int(resize_value[:-1])
            expected_size = value * 1024
        elif resize_value[-1] == "m":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024
        elif resize_value[-1] == "g":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024 * 1024
        else:
            raise error.TestError("Unknown scale value")

        image_info = utils_misc.get_image_info(image_path)
        actual_size = int(image_info['vsize'])

        logging.info("The expected block size is %s bytes, "
                     "the actual block size is %s bytes",
                     expected_size, actual_size)

        # See comment above regarding Raw images
        if image_format == "raw" and resize_value[-2] in "kb":
            if abs(int(actual_size) - int(expected_size)) > 512:
                raise error.TestFail("New raw blocksize set by blockresize do "
                                     "not match the expected value")
        else:
            if int(actual_size) != int(expected_size):
                raise error.TestFail("New blocksize set by blockresize is "
                                     "different from actual size from "
                                     "'qemu-img info'")
    finally:
        virsh.detach_disk(vm_name, target="vdd")

        if os.path.exists(image_path):
            os.remove(image_path)
コード例 #39
0
ファイル: virsh_volume.py プロジェクト: PandaWei/tp-libvirt
    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0
        volume_xml = {}
        (isavail, actual_list) = get_vol_list(expected['pool_name'],
                                              expected['name'])
        actual_info = get_vol_info(expected['pool_name'],
                                   expected['name'])
        if not avail:
            if isavail:
                error_count += 1
                logging.error("Deleted vol: %s is still shown in vol-list",
                              expected['name'])
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count
        else:
            if not isavail:
                logging.error("Volume list does not show volume %s",
                              expected['name'])
                logging.error("Volume creation failed")
                error_count += 1

        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.get_vol_details_by_name(expected['name'],
                                                            expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml['key']:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\n Key from command: %s", expected['name'], volume_xml['key'], vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\n Output of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\n Output of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if isavail:
            if expected['path'] != actual_list['path']:
                logging.error("Volume path mismatch for volume:%s\n"
                              "Expected Path: %s\n Path from virsh vol-list: %s", expected[
                                  'name'], expected['path'],
                              actual_list['path'])
                error_count += 1
            else:
                logging.debug("Path of volume: %s from virsh vol-list "
                              "successfully checked against created "
                              "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml['path']:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\n Path from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml['path'])
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if isavail:
            if expected['type'] != actual_list['type']:
                logging.error("Volume type mismatch for volume: %s\n"
                              "Expected Type: %s\n Type from vol-list: %s",
                              expected['name'],
                              expected['type'], actual_list['type'])
                error_count += 1
            else:
                logging.debug("Type of volume: %s from virsh vol-list "
                              "successfully checked against the created "
                              "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format'] != img_info['format']:
            logging.error("Volume format mismatch for volume: %s\n"
                          "Expected format: %s\n Format from qemu-img info: %s",
                          expected['name'],
                          expected['format'], img_info['format'])
            error_count += 1
        else:
            logging.debug("Format of volume: %s from qemu-img info checked "
                          "successfully against the created volume format",
                          expected['name'])

        # Check format against vol-dumpxml
        if expected['format'] != volume_xml['format']:
            logging.error("Volume format mismatch for volume: %s\n"
                          "Expected format: %s\n Format from vol-dumpxml: %s",
                          expected['name'],
                          expected['format'], volume_xml['format'])
            error_count += 1
        else:
            logging.debug("Format of volume: %s from virsh vol-dumpxml checked"
                          " successfully against the created volume format",
                          expected['name'])

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml['capacity']
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        if expected['capacity'] != norm_cap['list']:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if expected['capacity'] != norm_cap['info']:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if expected['capacity'] != norm_cap['xml']:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if expected['capacity'] != norm_cap['qemu_img']:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])

        return error_count
コード例 #40
0
    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(
            expected['name'], expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error(
                "Volume key is mismatch \n%s"
                "Key from xml: %s\nKey from command: %s", expected['name'],
                volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug(
                "virsh vol-key for volume: %s successfully"
                " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error(
                "Volume name mismatch\n"
                "Expected name: %s\nOutput of vol-name: %s", expected['name'],
                get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error(
                "Volume path mismatch for volume: %s\n"
                "Expected path: %s\nOutput of vol-path: %s\n",
                expected['name'], expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug(
                "virsh vol-path for volume: %s successfully checked"
                " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error(
                "Volume path mismatch for volume:%s\n"
                "Expected Path: %s\nPath from virsh vol-list: %s",
                expected['name'], expected['path'], actual_list['path'])
            error_count += 1
        else:
            logging.debug(
                "Path of volume: %s from virsh vol-list "
                "successfully checked against created "
                "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error(
                "Volume path mismatch for volume: %s\n"
                "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug(
                "Path of volume: %s from virsh vol-dumpxml "
                "successfully checked against created volume path",
                expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error(
                "Volume type mismatch for volume: %s\n"
                "Expected Type: %s\n Type from vol-list: %s", expected['name'],
                expected['type'], actual_list['type'])
            error_count += 1
        else:
            logging.debug(
                "Type of volume: %s from virsh vol-list "
                "successfully checked against the created "
                "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error(
                "Volume type mismatch for volume: %s\n"
                "Expected Type: %s\n Type from vol-info: %s", expected['name'],
                expected['type'], actual_info['Type'])
            error_count += 1
        else:
            logging.debug(
                "Type of volume: %s from virsh vol-info successfully"
                " checked against the created volume type", expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error(
                "Volume name mismatch for volume: %s\n"
                "Expected name: %s\n Name from vol-info: %s", expected['name'],
                expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug(
                "Name of volume: %s from virsh vol-info successfully"
                " checked against the created volume name", expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error(
                    "Volume format mismatch for volume: %s\n"
                    "Expected format: %s\n"
                    "Format from qemu-img info: %s", expected['name'],
                    expected['format'], img_info['format'])
                error_count += 1
            else:
                logging.debug(
                    "Format of volume: %s from qemu-img info "
                    "checked successfully against the created "
                    "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error(
                    "Volume format mismatch for volume: %s\n"
                    "Expected format: %s\n"
                    "Format from vol-dumpxml: %s", expected['name'],
                    expected['format'], volume_xml.format)
                error_count += 1
            else:
                logging.debug(
                    "Format of volume: %s from virsh vol-dumpxml "
                    "checked successfully against the created"
                    " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error(
                        "Encryption secret mismatch for volume: %s\n"
                        "Expected secret uuid: %s\n"
                        "Secret uuid from vol-dumpxml: %s", expected['name'],
                        expected['encrypt_secret'], secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s",
                                  secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error(
                "Pool name mismatch for volume: %s against"
                "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug(
                "Pool name of volume: %s checked successfully"
                " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = int(params.get('delta_size', "1024"))
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-list\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['list'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-info\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['info'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['xml'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-dumpxml for volume: %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against "
                "qemu-img info\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " qemu-img info for volume: %s", expected['name'])
        return error_count
コード例 #41
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_allocation = params.get("vol_allocation")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")
    b_luks_encrypted = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    wipe_old_vol = False

    if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            test.cancel("Option --prealloc-metadata "
                        "is not supported.")

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_path.find_command('scrub')
    except utils_path.CmdNotFoundError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann",
                            "schneier", "pfitzner7", "pfitzner33", "random"]

    # Choose an algorithm randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    libvirt_pvt = utlv.PoolVolumeTest(test, params)
    libvirt_pool = libvirt_storage.StoragePool()
    if libvirt_pool.pool_exists(pool_name):
        test.error("Pool '%s' already exist" % pool_name)
    try:
        # Create a new pool
        disk_vol = []
        if pool_type == 'disk':
            disk_vol.append(params.get("pre_vol", '10M'))
        libvirt_pvt.pre_pool(pool_name=pool_name,
                             pool_type=pool_type,
                             pool_target=pool_target,
                             emulated_image=emulated_image,
                             image_size=emulated_image_size,
                             pre_disk_vol=disk_vol)

        libvirt_vol = libvirt_storage.PoolVolume(pool_name)
        # Create a new volume
        if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            if (b_luks_encrypted and vol_format in ['raw']):
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS is not supported in current"
                                " libvirt version")
                luks_sec_uuid = create_luks_secret(os.path.join(pool_target,
                                                                vol_name),
                                                   encryption_password, test)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(vol_capability)
                vol_arg['allocation'] = int(vol_allocation)
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                libvirt_pvt.pre_vol(vol_name=vol_name,
                                    vol_format=vol_format,
                                    capacity=vol_capability,
                                    allocation=None,
                                    pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = list(utlv.get_vol_list(pool_name).keys())[0]
            logging.debug("Find partition %s in disk pool", vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Unknown volume format %s" % vol_format)

        # Refresh the pool
        virsh.pool_refresh(pool_name, debug=True)
        vol_info = libvirt_vol.volume_info(vol_name)
        if not vol_info:
            test.error("Fail to get info of volume %s" % vol_name)

        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"):
            clone_status_error = True
        if b_luks_encrypted:
            wipe_old_vol = True

        if pool_type == "disk":
            new_vol_name = utlv.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)

        bad_cloned_vol_name = params.get("bad_cloned_vol_name", "")
        if bad_cloned_vol_name:
            new_vol_name = bad_cloned_vol_name

        # Clone volume
        clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name,
                                       clone_option, debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                test.fail("Clone volume fail:\n%s" %
                          clone_result.stderr.strip())
            else:
                vol_info = libvirt_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri, debug=True)
                unsupported_err = ["Unsupported algorithm",
                                   "no such pattern sequence"]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr for err in unsupported_err):
                            test.cancel(wipe_result.stderr)
                        test.fail("Wipe volume fail:\n%s" %
                                  clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libvirt_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                test.fail("Expect wiped volume "
                                          "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    test.fail("Expect wipe volume fail, but run"
                              " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            test.fail("Expect clone volume fail, but run"
                      " successfully.")

        if wipe_old_vol:
            # Wipe the old volume
            if alg:
                logging.debug("Wiping volume by '%s' algorithm", alg)
            wipe_result = virsh.vol_wipe(vol_name, pool_name, alg,
                                         unprivileged_user=unpri_user,
                                         uri=uri, debug=True)
            unsupported_err = ["Unsupported algorithm",
                               "no such pattern sequence"]
            if not wipe_status_error:
                if wipe_result.exit_status != 0:
                    if any(err in wipe_result.stderr for err in unsupported_err):
                        test.cancel(wipe_result.stderr)
                    test.fail("Wipe volume fail:\n%s" %
                              clone_result.stdout.strip())
                else:
                    virsh_vol_info = libvirt_vol.volume_info(vol_name)
                    for key in virsh_vol_info:
                        logging.debug("Wiped volume info(virsh): %s = %s",
                                      key, virsh_vol_info[key])
                    vol_path = virsh.vol_path(vol_name,
                                              pool_name).stdout.strip()
                    qemu_vol_info = utils_misc.get_image_info(vol_path)
                    for key in qemu_vol_info:
                        logging.debug("Wiped volume info(qemu): %s = %s",
                                      key, qemu_vol_info[key])
                        if qemu_vol_info['format'] != 'raw':
                            test.fail("Expect wiped volume "
                                      "format is raw")
            elif wipe_status_error and wipe_result.exit_status == 0:
                test.fail("Expect wipe volume fail, but run"
                          " successfully.")

        if bad_cloned_vol_name:
            pattern = "volume name '%s' cannot contain '/'" % new_vol_name
            if re.search(pattern, clone_result.stderr) is None:
                test.fail("vol-clone failed with unexpected reason")
    finally:
        # Clean up
        try:
            libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                     emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)

        except exceptions.TestFail as detail:
            logging.error(str(detail))
コード例 #42
0
def run(test, params, env):
    """
    Test storage migration
    1) Do storage migration(copy-storage-all/copy-storage-inc) with
    TLS encryption - NBD transport
    2) Cancel storage migration with TLS encryption
    3) Copy only the top image for storage migration with backing chain

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def prepare_nfs_backingfile(vm, params):
        """
        Create an image using nfs type backing_file

        :param vm: The guest
        :param params: the parameters used
        """
        mnt_path_name = params.get("nfs_mount_dir", "nfs-mount")
        exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
        exp_dir = params.get("export_dir", "nfs-export")
        backingfile_img = params.get("source_dist_img", "nfs-img")
        disk_format = params.get("disk_format", "qcow2")
        img_name = params.get("img_name", "test.img")
        precreation = "yes" == params.get("precreation", "yes")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        disk_xml = vmxml.devices.by_device_tag('disk')[0]
        src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        disk_img = os.path.join(os.path.dirname(blk_source), img_name)

        res = libvirt.setup_or_cleanup_nfs(True, mnt_path_name, is_mount=True,
                                           export_options=exp_opt,
                                           export_dir=exp_dir)
        mnt_path = res["mount_dir"]
        params["selinux_status_bak"] = res["selinux_status_bak"]

        if vm.is_alive():
            vm.destroy(gracefully=False)

        disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
                    (src_disk_format, disk_format,
                     blk_source, mnt_path, backingfile_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append("%s/%s" % (mnt_path, backingfile_img))
        logging.debug("Create a local image backing on NFS.")
        disk_cmd = ("qemu-img create -f %s -b %s/%s %s" %
                    (disk_format, mnt_path, backingfile_img, disk_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append(disk_img)
        if precreation:
            logging.debug("Create an image backing on NFS on remote host.")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append("%s/%s" % (mnt_path, backingfile_img))
            remote_image_list.append(disk_img)
            remote_session.close()

        params.update({'disk_source_name': disk_img,
                       'disk_type': 'file',
                       'disk_source_protocol': 'file'})
        libvirt.set_vm_disk(vm, params)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    copy_storage_option = params.get("copy_storage_option")
    extra = params.get("virsh_migrate_extra", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    backingfile_type = params.get("backingfile_type")
    check_str_local_log = params.get("check_str_local_log", "")
    disk_format = params.get("disk_format", "qcow2")
    log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log")
    daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}'))
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    migrate_again = "yes" == params.get("migrate_again", "no")
    precreation = "yes" == params.get("precreation", "yes")
    tls_recovery = "yes" == params.get("tls_auto_recovery", "yes")
    func_params_exists = "yes" == params.get("func_params_exists", "no")
    status_error = "yes" == params.get("status_error", "no")

    local_image_list = []
    remote_image_list = []
    tls_obj = None

    func_name = None
    daemon_conf = None
    mig_result = None
    remote_session = None
    vm_session = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra = "{} {}".format(extra, copy_storage_option)

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})
    if cancel_migration:
        func_name = migration_test.do_cancel

    # For safety reasons, we'd better back up  xmlfile.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        if backingfile_type:
            if backingfile_type == "nfs":
                prepare_nfs_backingfile(vm, params)

        if extra.count("copy-storage-all") and precreation:
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (disk_format, blk_source, vsize))
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append(blk_source)
            remote_session.close()

        # Update libvirtd configuration
        if daemon_conf_dict:
            if os.path.exists(log_file):
                os.remove(log_file)
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

        if extra.count("--tls"):
            tls_obj = TLSConnection(params)
            if tls_recovery:
                tls_obj.auto_recover = True
                tls_obj.conn_setup()

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    extra_opts=extra, func=func_name,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if migrate_again and status_error:
            logging.debug("Sleeping 10 seconds before rerun migration")
            time.sleep(10)
            if cancel_migration:
                func_name = None
            params["status_error"] = "no"
            migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                        options, thread_timeout=900,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra, func=func_name,
                                        **extra_args)

            mig_result = migration_test.ret
            migration_test.check_result(mig_result, params)
        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        except Exception as err:
            logging.error(err)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()

        if daemon_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None, is_recover=True,
                                             config_object=daemon_conf)
        if tls_obj:
            logging.debug("Clean up local objs")
            del tls_obj
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)

        if remote_session:
            remote_session.close()
コード例 #43
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    case = params.get('case', '')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    file_to_del = []

    try:
        if case:
            if case == 'reuse_external':
                # Create a transient vm for test
                vm.undefine()
                virsh.create(vmxml.xml)

                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                image_file = all_disks[0].find('source').get('file')
                disk_dev = all_disks[0].find('target').get('dev')
                logging.debug('Image file of vm: %s', image_file)

                # Get image info
                image_info = utils_misc.get_image_info(image_file)
                logging.info('Image info: %s', image_info)

                # Get Virtual size of the image file
                vsize = image_info['vsize'] / 1073741824.0
                logging.info('Virtual size of image file: %f', vsize)

                new_image_size = vsize
                image_dir = '/'.join(image_file.split('/')[:-1])
                new_image_path = os.path.join(
                    image_dir,
                    'new_image_' + utils_misc.generate_random_string(3))
                file_to_del.append(new_image_path)

                # Create new image file
                cmd_image_create = 'qemu-img create -f qcow2 %s %fG' % (
                    new_image_path, new_image_size)
                process.run(cmd_image_create, shell=True, verbose=True)

                # Do blockcopy with --reuse-external option
                virsh.blockcopy(vm_name,
                                disk_dev,
                                new_image_path,
                                options='--verbose --wait --reuse-external',
                                debug=True,
                                ignore_status=False)
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--pivot',
                               debug=True,
                               ignore_status=False)
                logging.debug('Current vm xml: %s', vmxml)

                # Current disk source file should be new image
                cur_disks = vmxml.get_disk_source(vm_name)
                cur_sfile = cur_disks[0].find('source').get('file')
                logging.debug('Now disk source file is: %s', cur_sfile)
                if cur_sfile.strip() != new_image_path:
                    test.fail('Disk source file is not updated.')

    finally:
        if case == 'reuse_external':
            # Recover vm and remove the transient vm
            virsh.destroy(vm_name, debug=True)
            virsh.define(bkxml.xml, debug=True)
        bkxml.sync()

        # Remove files to be deleted
        if file_to_del:
            for item in file_to_del:
                if os.path.exists(item):
                    os.remove(item)
コード例 #44
0
ファイル: virt_sysprep.py プロジェクト: FengYang/tp-libvirt
                os.remove(clone_image)
        except error.CmdError, detail:
            raise error.TestFail("Clean clone guest failed!:%s" % detail)

    sysprep_type = params.get("sysprep_type", 'clone')
    sysprep_target = params.get("sysprep_target", 'guest')
    sysprep_hostname = params.get("sysprep_hostname", 'sysprep_test')
    vm_name = params.get("main_vm", "virt-tests-vm1")
    file_system = params.get("sysprep_file_system", "ext3")
    vm = env.get_vm(vm_name)
    disks = vm.get_disk_devices()
    if len(disks):
        disk = disks.values()[0]
        image = disk['source']
        target = disks.keys()[0]
        image_info_dict = utils_misc.get_image_info(image)
        if sysprep_type == "sparsify" and image_info_dict['format'] != 'qcow2':
            raise error.TestNAError("This test case needs qcow2 format image.")
    else:
        raise error.TestError("Can not get disk of %s" % vm_name)
    vt = libguestfs.VirtTools(vm, params)
    fs_type = vt.get_primary_disk_fs_type()
    if fs_type != file_system:
        raise error.TestNAError("This test case gets wrong disk file system."
                                "get: %s, expected: %s" % (fs_type,
                                                           file_system))

    # Do some prepare action
    vm_clone_name = "%s_clone" % vm_name
    clone_image = "%s_clone.img" % image
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
コード例 #45
0
ファイル: virsh_volume.py プロジェクト: Antique/tp-libvirt
    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'],
                                                         expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\nKey from command: %s",
                          expected['name'], volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\nOutput of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\nOutput of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error("Volume path mismatch for volume:%s\n"
                          "Expected Path: %s\nPath from virsh vol-list: %s",
                          expected['name'], expected['path'],
                          actual_list['path'])
            error_count += 1
        else:
            logging.debug("Path of volume: %s from virsh vol-list "
                          "successfully checked against created "
                          "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                          expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-list: %s",
                          expected['name'], expected['type'],
                          actual_list['type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-list "
                          "successfully checked against the created "
                          "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from qemu-img info: %s",
                              expected['name'], expected['format'],
                              img_info['format'])
                error_count += 1
            else:
                logging.debug("Format of volume: %s from qemu-img info "
                              "checked successfully against the created "
                              "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from vol-dumpxml: %s",
                              expected['name'], expected['format'],
                              volume_xml.format)
                error_count += 1
            else:
                logging.debug("Format of volume: %s from virsh vol-dumpxml "
                              "checked successfully against the created"
                              " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error("Encryption secret mismatch for volume: %s\n"
                                  "Expected secret uuid: %s\n"
                                  "Secret uuid from vol-dumpxml: %s",
                                  expected['name'], expected['encrypt_secret'],
                                  secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s", secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = params.get('delta_size', "1024")
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])
        return error_count
コード例 #46
0
ファイル: blockcommand.py プロジェクト: yalzhang/tp-libvirt
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    case = params.get('case', '')
    blockcommand = params.get('blockcommand', '')
    blk_top = int(params.get('top', 0))
    blk_base = int(params.get('base', 0))
    opts = params.get('opts', '--verbose --wait')
    check_func = params.get('check_func', '')
    disk_type = params.get('disk_type', '')
    disk_src = params.get('disk_src', '')
    driver_type = params.get('driver_type', 'qcow2')
    vol_name = params.get('vol_name', 'vol_blockpull')
    pool_name = params.get('pool_name', '')
    brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name)
    vg_name = params.get('vg_name', 'HostVG')
    vol_size = params.get('vol_size', '10M')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    # List to collect paths to delete after test
    file_to_del = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        all_disks = vmxml.get_disk_source(vm_name)
        if not all_disks:
            test.error('Not found any disk file in vm.')
        image_file = all_disks[0].find('source').get('file')
        logging.debug('Image file of vm: %s', image_file)

        # Get all dev of virtio disks to calculate the dev of new disk
        all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')]
        disk_dev = all_vdisks[-1].find('target').get('dev')
        new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1)

        # Setup iscsi target
        if disk_src == 'iscsi':
            disk_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target: %s', disk_target)

        # Setup lvm
        elif disk_src == 'lvm':
            # Stop multipathd to avoid vgcreate fail
            multipathd = service.Factory.create_service("multipathd")
            multipathd_status = multipathd.status()
            if multipathd_status:
                multipathd.stop()

            # Setup iscsi target
            device_name = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target for lvm: %s', device_name)

            # Create logical device
            logical_device = device_name
            lv_utils.vg_create(vg_name, logical_device)
            vg_created = True

            # Create logical volume as backing store
            vol_bk, vol_disk = 'vol1', 'vol2'
            lv_utils.lv_create(vg_name, vol_bk, vol_size)

            disk_target = '/dev/%s/%s' % (vg_name, vol_bk)
            src_vol = '/dev/%s/%s' % (vg_name, vol_disk)

        # Setup gluster
        elif disk_src == 'gluster':
            host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True, brick_path=brick_path, **params)
            logging.debug(host_ip)
            gluster_img = 'test.img'
            img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img
            process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
                        % (host_ip, vol_name, img_create_cmd), shell=True)
            disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img)

        else:
            test.error('Wrong disk source, unsupported by this test.')

        new_image = os.path.join(os.path.split(image_file)[0], 'test.img')
        params['snapshot_list'] = ['s%d' % i for i in range(1, 5)]

        if disk_src == 'lvm':
            new_image = src_vol
            if disk_type == 'block':
                new_image = disk_target
                for i in range(2, 6):
                    lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size)
                snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)]
        else:
            file_to_del.append(new_image)
            snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']]
        cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image)
        if disk_type == 'block' and driver_type == 'raw':
            pass
        else:
            process.run(cmd_create_img, verbose=True, shell=True)
        info_new = utils_misc.get_image_info(new_image)
        logging.debug(info_new)

        # Create xml of new disk and add it to vmxml
        if disk_type:
            new_disk = Disk()
            new_disk.xml = libvirt.create_disk_xml({
                'type_name': disk_type,
                'driver_type': driver_type,
                'target_dev': new_dev,
                'source_file': new_image
            })

            logging.debug(new_disk.xml)

            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.xmltreefile.write()
            logging.debug(vmxml)
            vmxml.sync()

        vm.start()
        logging.debug(virsh.dumpxml(vm_name))

        # Create backing chain
        for i in range(len(params['snapshot_list'])):
            virsh.snapshot_create_as(
                vm_name,
                '%s --disk-only --diskspec %s,file=%s,stype=%s' %
                (params['snapshot_list'][i], new_dev, snapshot_image_list[i],
                 disk_type),
                **virsh_dargs
            )

            # Get path of each snapshot file
            snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines()
            for line in snaps:
                if line.lstrip().startswith(('hd', 'sd', 'vd')):
                    file_to_del.append(line.split()[-1])

        qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1]
        if libvirt_storage.check_qemu_image_lock_support():
            qemu_img_cmd += " -U"
        bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text

        if not disk_type == 'block':
            bc_chain = snapshot_image_list[::-1] + [new_image, disk_target]
        else:
            bc_chain = snapshot_image_list[::-1] + [new_image]
        bc_result = check_backingchain(bc_chain, bc_info)
        if not bc_result:
            test.fail('qemu-img info output of backing chain is not correct: %s'
                      % bc_info)

        # Generate blockpull/blockcommit options
        virsh_blk_cmd = eval('virsh.%s' % blockcommand)
        if blockcommand == 'blockpull' and blk_base != 0:
            opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev)
        elif blockcommand == 'blockcommit':
            opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else ''
            opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else ''
            opts += opt_top + opt_base + ' --active' if blk_top == 0 else ''

        # Do blockpull/blockcommit
        virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs)
        if blockcommand == 'blockcommit':
            virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("XML after %s: %s" % (blockcommand, vmxml))

        # Check backing chain after blockpull/blockcommit
        check_bc_func_name = 'check_bc_%s' % check_func
        if check_bc_func_name in globals():
            check_bc = eval(check_bc_func_name)
            if not callable(check_bc):
                logging.warning('Function "%s" is not callable.', check_bc_func_name)
            if not check_bc(blockcommand, vmxml, new_dev, bc_chain):
                test.fail('Backing chain check after %s failed' % blockcommand)
        else:
            logging.warning('Function "%s" is not implemented.', check_bc_func_name)

        virsh.dumpxml(vm_name, debug=True)

        # Check whether login is successful
        try:
            vm.wait_for_login().close()
        except Exception as e:
            test.fail('Vm login failed')

    finally:
        logging.info('Start cleaning up.')
        for ss in params.get('snapshot_list', []):
            virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True)
        bkxml.sync()
        for path in file_to_del:
            logging.debug('Remove %s', path)
            if os.path.exists(path):
                os.remove(path)
        if disk_src == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'lvm':
            process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True)
            if 'vol_bk' in locals():
                lv_utils.lv_remove(vg_name, vol_bk)
            if 'vg_created' in locals() and vg_created:
                lv_utils.vg_remove(vg_name)
                cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
                pv_name = process.system_output(cmd, shell=True, verbose=True).strip()
                if pv_name:
                    process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'gluster':
            gluster.setup_or_cleanup_gluster(
                is_setup=False, brick_path=brick_path, **params)
        if 'multipathd_status' in locals() and multipathd_status:
            multipathd.start()
コード例 #47
0
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise error.TestNAError(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        raise error.TestNAError(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
コード例 #48
0
def run(test, params, env):
    vd_formats = []
    disk_devices = []
    driver_names = []
    driver_types = []
    device_targets = []
    target_buses = []
    wwnns = []
    wwpns = []

    vm_names = params.get("vms", "avocado-vt-vm1 avocado-vt-vm2").split()
    fc_host_dir = params.get("fc_host_dir", "/sys/class/fc_host")
    vm0_disk_type = params.get("vm0_disk_type", "block")
    vm1_disk_type = params.get("vm1_disk_type", "block")
    vm0_vd_format = params.get("vm0_vd_format", "by_path")
    vm1_vd_format = params.get("vm1_vd_foramt", "by_path")
    vm0_disk_device = vm1_disk_device = params.get("disk_device", "disk")
    vm0_driver_name = vm1_driver_name = params.get("driver_name", "qemu")
    vm0_driver_type = vm1_driver_type = params.get("driver_type", "qcow2")
    vm0_device_target = vm1_device_target = params.get("device_target", "vda")
    vm0_target_bus = vm1_target_bus = params.get("target_bus", "virtio")
    vm0_wwnn = params.get("vm0_wwnn", "ENTER.WWNN.FOR.VM0")
    vm0_wwpn = params.get("vm0_wwpn", "ENTER.WWPN.FOR.VM0")
    vm1_wwnn = params.get("vm1_wwnn", "ENTER.WWNN.FOR.VM1")
    vm1_wwpn = params.get("vm1_wwpn", "ENTER.WWPN.FOR.VM1")

    disk_types = [vm0_disk_type, vm1_disk_type]
    vd_formats = [vm0_vd_format, vm1_vd_format]
    disk_devices = [vm0_disk_device, vm1_disk_device]
    driver_names = [vm0_driver_name, vm1_driver_name]
    driver_types = [vm0_driver_type, vm1_driver_type]
    device_targets = [vm0_device_target, vm1_device_target]
    target_buses = [vm0_target_bus, vm1_target_bus]
    wwnns = [vm0_wwnn, vm1_wwnn]
    wwpns = [vm0_wwpn, vm1_wwpn]
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    new_vhbas = []
    path_to_blks = []
    vmxml_backups = []
    vms = []

    try:
        online_hbas = utils_npiv.find_hbas("hba")
        if not online_hbas:
            test.cancel("There is no online hba cards.")
        old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                           replace_existing=True)
        first_online_hba = online_hbas[0]
        if len(vm_names) != 2:
            test.cancel("This test needs exactly 2 vms.")
        for vm_index in range(len(vm_names)):
            logging.debug("prepare vm %s", vm_names[vm_index])
            vm = env.get_vm(vm_names[vm_index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[vm_index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            old_vhbas = utils_npiv.find_hbas("vhba")
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml(
                    {"nodedev_parent": first_online_hba,
                     "scsi_wwnn": wwnns[vm_index],
                     "scsi_wwpn": wwpns[vm_index]})
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    timeout=_TIMEOUT*2)
            if not new_vhba:
                test.fail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_formats[vm_index] == "mpath":
                utils_misc.wait_for(
                        lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                        timeout=_TIMEOUT*5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    test.fail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(set(cur_mpath_devs).difference(
                    set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = os.path.join(_MPATH_DIR, new_mpath_devs[0])
            elif vd_formats[vm_index] == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(test, new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(test, new_vhba_scsibus)
                if not new_blks:
                    test.fail("blk dev not found with scsi_%s" % new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(
                        lambda: get_symbols_by_blk(test, first_blk_dev),
                        timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(test, first_blk_dev)
                if not lun_sl:
                    test.fail("lun symbolic links not found in "
                              "/dev/disk/by-path/ for %s" %
                              first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BYPATH_DIR, lun_dev)
            path_to_blks.append(path_to_blk)
            img_src = vm.get_first_disk_devices()['source']
            img_info = utils_misc.get_image_info(img_src)
            src_fmt = img_info["format"]
            dest_fmt = "qcow2"
            convert_img_to_dev(test, src_fmt, dest_fmt, img_src, path_to_blk)
            disk_obj = prepare_disk_obj(disk_types[vm_index], disk_devices[vm_index],
                                        driver_names[vm_index], driver_types[vm_index],
                                        path_to_blk, device_targets[vm_index],
                                        target_buses[vm_index])
            replace_vm_first_vd(vm_names[vm_index], disk_obj)
            if vm.is_dead():
                logging.debug("Start vm %s with updated vda", vm_names[vm_index])
                vm.start()

        # concurrently create file in vm with threads
        create_file_in_vm_threads = []
        for vm in vms:
            cli_t = threading.Thread(target=create_file_in_vm,
                                     args=(vm, _VM_FILE_PATH, vm.name, _REPEAT,)
                                     )
            logging.debug("Start creating file in vm: %s", vm.name)
            create_file_in_vm_threads.append(cli_t)
            cli_t.start()
        for thrd in create_file_in_vm_threads:
            thrd.join()

        # reboot vm and check if previously create file still exist with
        # correct content
        for vm in vms:
            session = vm.wait_for_login()
            session.cmd_status_output("sync")
            if vm.is_alive:
                vm.destroy(gracefully=True)
            else:
                test.fail("%s is not running" % vm.name)
            vm.start()
            session = vm.wait_for_login()
            if check_file_in_vm(session, _VM_FILE_PATH, vm.name, _REPEAT):
                logging.debug("file exists after reboot with correct content")
            else:
                test.fail("Failed to check the test file in vm")
            session.close()
    except Exception as detail:
        test.fail("Test failed with exception: %s" % detail)
    finally:
        logging.debug("Start to clean up env...")
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        process.system('service multipathd restart', verbose=True)
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
コード例 #49
0
def run(test, params, env):
    """
    Test virsh blockresize command for block device of domain.

    1) Init the variables from params.
    2) Create an image with specified format.
    3) Attach a disk image to vm.
    4) Test blockresize for the disk
    5) Detach the disk
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm", "virt-tests-vm1")
    image_format = params.get("disk_image_format", "qcow2")
    initial_disk_size = params.get("initial_disk_size", "1M")
    status_error = "yes" == params.get("status_error", "yes")
    resize_value = params.get("resize_value")
    virsh_dargs = {'debug': True}

    # Create an image.
    tmp_dir = data_dir.get_tmp_dir()
    image_path = os.path.join(tmp_dir, "blockresize_test")
    logging.info("Create image: %s, "
                 "size %s, "
                 "format %s", image_path, initial_disk_size, image_format)

    cmd = "qemu-img create -f %s %s %s" % (image_format, image_path,
                                           initial_disk_size)
    status, output = commands.getstatusoutput(cmd)
    if status:
        raise error.TestError("Creating image file %s failed: %s" % \
                                (image_path, output))

    # Hotplug the image as disk device
    result = virsh.attach_disk(vm_name, source=image_path, target="vdd",
                               extra=" --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestError("Failed to attach disk %s to VM: %s." %
                                (image_path, result.stderr))

    if resize_value == "over_size":
        # Use byte unit for over_size test
        resize_value = "%s" % OVER_SIZE + "b"

    # Run the test
    try:
        result = virsh.blockresize(vm_name, image_path,
                                   resize_value, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect failure, but run successfully!")
            # No need to do more test
            return
        else:
            if status != 0 or err != "":
                raise error.TestFail("Run failed with right "
                                     "virsh blockresize command")

        if resize_value[-1] in "bkm":
            expected_size = 1024*1024
        elif resize_value[-1] == "g":
            expected_size = 1024*1024*1024
        else:
            raise  error.TestError("Unknown infomation of unit")

        image_info = utils_misc.get_image_info(image_path)
        actual_size = int(image_info['vsize'])

        logging.info("The expected block size is %s bytes,"
                     "the actual block size is %s bytes",
                     expected_size, actual_size)

        if int(actual_size) != int(expected_size):
            raise error.TestFail("New blocksize set by blockresize is "
                                 "different from actual size from "
                                 "'qemu-img info'")
    finally:
        virsh.detach_disk(vm_name, target="vdd")

        if os.path.exists(image_path):
            os.remove(image_path)