Esempio n. 1
0
    def setup(self):
        """
        Setup NFS in host.

        Mount NFS as configured. If a local nfs is requested, setup the NFS
        service and exportfs too.
        """
        if self.nfs_setup:
            if not self.nfs_service.status():
                logging.debug("Restart NFS service.")
                self.nfs_service.restart()

            if not os.path.isdir(self.export_dir):
                os.makedirs(self.export_dir)
            self.exportfs.export()
            self.unexportfs_in_clean = not self.exportfs.already_exported

        logging.debug("Mount %s to %s" % (self.mount_src, self.mount_dir))
        if os.path.exists(
                self.mount_dir) and not os.path.isdir(self.mount_dir):
            raise OSError(
                "Mount point %s is not a directory, check your setup." %
                self.mount_dir)

        if not os.path.isdir(self.mount_dir):
            os.makedirs(self.mount_dir)

        utils_misc.mount(self.mount_src,
                         self.mount_dir,
                         "nfs",
                         perm=self.mount_options)
Esempio n. 2
0
def copy_file_from_nfs(src, dst, mount_point, image_name):
    logging.info("Test failed before the install process start."
                 " So just copy a good image from nfs for following tests.")
    utils_misc.mount(src, mount_point, "nfs", perm="ro")
    image_src = utils_misc.get_path(mount_point, image_name)
    shutil.copy(image_src, dst)
    utils_misc.umount(src, mount_point, "nfs")
Esempio n. 3
0
    def test_has_run_againt_vddk7_0():
        """
        check if nbdkit --run + vddk + esx7.0 works.
        """
        from virttest.utils_pyvmomi import VSphereConnection, vim

        vm_name = params_get(params, "main_vm")
        if not vm_name:
            test.error('No VM specified')
        # vsphere server's host name or IP address
        vsphere_host = params_get(params, "vsphere_host")
        vsphere_user = params_get(params, "vsphere_user", 'root')
        # vsphere password
        vsphere_pwd = params_get(params, "vsphere_pwd")
        vsphere_passwd_file = params_get(params, "vpx_passwd_file",
                                         '/tmp/v2v_vpx_passwd')
        with open(vsphere_passwd_file, 'w') as fd:
            fd.write(vsphere_pwd)

        # get vm and file's value
        connect_args = {
            'host': vsphere_host,
            'user': vsphere_user,
            'pwd': vsphere_pwd
        }
        with VSphereConnection(**connect_args) as conn:
            conn.target_vm = vm_name
            nbdkit_vm_name = 'moref=' + \
                str(conn.target_vm).strip('\'').split(':')[1]
            nbdkit_file = conn.get_hardware_devices(
                dev_type=vim.vm.device.VirtualDisk)[0].backing.fileName

        # vddk_libdir
        vddk_libdir_src = params_get(params, "vddk_libdir_src")
        with tempfile.TemporaryDirectory(prefix='vddklib_') as vddk_libdir:
            utils_misc.mount(vddk_libdir_src, vddk_libdir, 'nfs')
            vddk_thumbprint = '11'
            nbdkit_cmd = """
nbdkit -rfv -U - --exportname / \
  --filter=cacheextents --filter=retry vddk server=%s user=%s password=+%s vm=%s \
  file='%s' libdir=%s --run 'qemu-img info $nbd' thumbprint=%s
""" % (vsphere_host, vsphere_user, vsphere_passwd_file, nbdkit_vm_name,
            nbdkit_file, vddk_libdir, vddk_thumbprint)
            # get thumbprint by a trick
            cmd_result = process.run(nbdkit_cmd,
                                     shell=True,
                                     ignore_status=True)
            output = cmd_result.stdout_text + cmd_result.stderr_text
            vddk_thumbprint = re.search(r'PeerThumbprint:\s+(.*)',
                                        output).group(1)

            # replace thumbprint with correct value
            nbdkit_cmd = nbdkit_cmd.strip()[:-2] + vddk_thumbprint
            logging.info('nbdkit command:\n%s', nbdkit_cmd)

            # Run the finnal nbdkit command
            output = process.run(nbdkit_cmd, shell=True).stdout_text
            utils_misc.umount(vddk_libdir_src, vddk_libdir, 'nfs')
            if not re.search(r'virtual size', output):
                test.fail('failed to test has_run_againt_vddk7_0')
Esempio n. 4
0
    def setup(self):
        """
        Setup NFS in host.

        Mount NFS as configured. If a local nfs is requested, setup the NFS
        service and exportfs too.
        """
        if self.nfs_setup:
            if not self.nfs_service.status():
                logging.debug("Restart NFS service.")
                self.nfs_service.restart()

            if not os.path.isdir(self.export_dir):
                os.makedirs(self.export_dir)
            self.exportfs.export()
            self.unexportfs_in_clean = not self.exportfs.already_exported

        logging.debug("Mount %s to %s" % (self.mount_src, self.mount_dir))
        if os.path.exists(self.mount_dir) and not os.path.isdir(self.mount_dir):
            raise OSError(
                "Mount point %s is not a directory, check your setup." %
                self.mount_dir)

        if not os.path.isdir(self.mount_dir):
            os.makedirs(self.mount_dir)

        utils_misc.mount(self.mount_src, self.mount_dir, "nfs",
                         perm=self.mount_options)
def copy_file_from_nfs(src, dst, mount_point, image_name):
    logging.info("Test failed before the install process start."
                 " So just copy a good image from nfs for following tests.")
    utils_misc.mount(src, mount_point, "nfs", perm="ro")
    image_src = utils_misc.get_path(mount_point, image_name)
    shutil.copy(image_src, dst)
    utils_misc.umount(src, mount_point, "nfs")
Esempio n. 6
0
def glusterfs_mount(g_uri, mount_point):
    """
    Mount gluster volume to mountpoint.

    :param g_uri: stripped gluster uri from create_gluster_uri(.., True)
    :type g_uri: str
    """
    utils_misc.mount(g_uri, mount_point, "glusterfs", None,
                     False, "fuse.glusterfs")
Esempio n. 7
0
def glusterfs_mount(g_uri, mount_point):
    """
    Mount gluster volume to mountpoint.

    :param g_uri: stripped gluster uri from create_gluster_uri(.., True)
    :type g_uri: str
    """
    utils_misc.mount(g_uri, mount_point, "glusterfs", None, False,
                     "fuse.glusterfs")
Esempio n. 8
0
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                             "glusterfs")
            utils.run("setsebool virt_use_fusefs on")
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": [host_dict]
            })
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type" %
                              dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
Esempio n. 9
0
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name),
                             default_pool, "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": [host_dict]})
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type"
                              % dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
Esempio n. 10
0
        def _compose_input_transport_options():
            """
            Set input transport options for v2v
            """
            options = ''
            if self.input_transport is None:
                return options

            if self.input_transport == 'vddk':
                if self.vddk_libdir is None or not os.path.isdir(
                        self.vddk_libdir):
                    # Invalid nfs mount source if no ':'
                    if self.vddk_libdir_src is None or ':' not in self.vddk_libdir_src:
                        logging.error(
                            'Neither vddk_libdir nor vddk_libdir_src was set')
                        raise exceptions.TestError(
                            "VDDK library directory or NFS mount point must be set"
                        )

                    mount_point = os.path.join(data_dir.get_tmp_dir(),
                                               'vddk_libdir')
                    if not os.path.exists(mount_point):
                        os.makedirs(mount_point)

                    if not utils_misc.mount(
                            self.vddk_libdir_src, mount_point, 'nfs',
                            verbose=True):
                        raise exceptions.TestError(
                            'Mount %s for %s failed' %
                            (self.vddk_libdir_src, mount_point))
                    self.vddk_libdir = mount_point
                    self.mount_record[len(
                        self.mount_record)] = (self.vddk_libdir_src,
                                               self.vddk_libdir, None)

                # Invalid vddk thumbprint if no ':'
                if self.vddk_thumbprint is None or ':' not in self.vddk_thumbprint:
                    if self.src_uri_type == 'esx':
                        tmp_host = self.esxi_host
                        tmp_passwd = self.esxi_password
                    else:
                        tmp_passwd = self.vcenter_password
                        tmp_host = self.vcenter_host
                    self.vddk_thumbprint = get_vddk_thumbprint(
                        tmp_host, tmp_passwd, self.src_uri_type)

            # New input_transport type can be added here

            # A dict to save input_transport types and their io options, new it_types
            # should be added here. Their io values were composed during running time
            # based on user's input
            input_transport_args = {
                'vddk':
                "-io vddk-libdir=%s -io vddk-thumbprint=%s" %
                (self.vddk_libdir, self.vddk_thumbprint)
            }

            options = " -it %s " % (self.input_transport)
            options += input_transport_args[self.input_transport]
            return options
Esempio n. 11
0
    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)
Esempio n. 12
0
def prepare_gluster_disk(blk_source, test, **kwargs):
    """
    Set up gluster disk device and replace the domain disk image

    :param blk_source: The domain disk image path
    :param test: Avocado test object
    :param kwargs: Key words for gluster device setup
    :return: host_ip
    """
    vol_name = kwargs.get("vol_name")
    brick_path = kwargs.get("brick_path")
    disk_img = kwargs.get("disk_img")
    disk_format = kwargs.get("disk_format")
    host_ip = gluster.setup_or_cleanup_gluster(True, **kwargs)
    logging.debug("host ip: %s ", host_ip)
    # Copy the domain disk image to gluster disk path
    image_info = utils_misc.get_image_info(blk_source)
    dest_image = "/mnt/%s" % disk_img
    if image_info["format"] == disk_format:
        disk_cmd = ("cp -f %s %s" % (blk_source, dest_image))
    else:
        disk_cmd = (
            "qemu-img convert -f %s -O %s %s %s" %
            (image_info["format"], disk_format, blk_source, dest_image))
    # Mount the gluster disk and create the image
    src_mnt = "%s:%s" % (host_ip, vol_name)
    if not utils_misc.mount(src_mnt, "/mnt", "glusterfs"):
        test.error("glusterfs mount failed")
    process.run("%s && chmod a+rw /mnt/%s && umount /mnt" %
                (disk_cmd, disk_img),
                shell=True)
    return host_ip
Esempio n. 13
0
def prepare_gluster_disk(blk_source, test, **kwargs):
    """
    Set up gluster disk device and replace the domain disk image

    :param blk_source: The domain disk image path
    :param test: Avocado test object
    :param kwargs: Key words for gluster device setup
    :return: host_ip
    """
    vol_name = kwargs.get("vol_name")
    brick_path = kwargs.get("brick_path")
    disk_img = kwargs.get("disk_img")
    disk_format = kwargs.get("disk_format")
    host_ip = utlv.setup_or_cleanup_gluster(True, vol_name, brick_path)
    logging.debug("host ip: %s ", host_ip)
    # Copy the domain disk image to gluster disk path
    image_info = utils_misc.get_image_info(blk_source)
    dest_image = "/mnt/%s" % disk_img
    if image_info["format"] == disk_format:
        disk_cmd = ("cp -f %s %s" % (blk_source, dest_image))
    else:
        disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                    (image_info["format"], disk_format,
                     blk_source, dest_image))
    # Mount the gluster disk and create the image
    src_mnt = "%s:%s" % (host_ip, vol_name)
    if not utils_misc.mount(src_mnt, "/mnt", "glusterfs"):
        test.error("glusterfs mount failed")
    process.run("%s && chmod a+rw /mnt/%s && umount /mnt" %
                (disk_cmd, disk_img), shell=True)
    return host_ip
Esempio n. 14
0
    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)
Esempio n. 15
0
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
Esempio n. 16
0
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
Esempio n. 17
0
 def mount(self):
     """
     Mount source into given mount point.
     """
     return utils_misc.mount(self.mount_src,
                             self.mount_dir,
                             "nfs",
                             perm=self.mount_options)
Esempio n. 18
0
 def build_disk_xml(disk_img, disk_format, host_ip):
     """
     Try to rebuild disk xml
     """
     if default_pool:
         disk_xml = Disk(type_name="file")
     else:
         disk_xml = Disk(type_name="network")
     disk_xml.device = "disk"
     driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
     if driver_iothread:
         driver_dict.update({"iothread": driver_iothread})
     disk_xml.driver = driver_dict
     disk_xml.target = {"dev": "vdb", "bus": "virtio"}
     if default_pool:
         utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                          "glusterfs")
         process.run("setsebool virt_use_fusefs on", shell=True)
         source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict})
     else:
         source_dict = {
             "protocol": "gluster",
             "name": "%s/%s" % (vol_name, disk_img)
         }
         host_dict = [{"name": host_ip, "port": "24007"}]
         # If mutiple_hosts is True, attempt to add multiple hosts.
         if multiple_hosts:
             host_dict.append({
                 "name": params.get("dummy_host1"),
                 "port": "24007"
             })
             host_dict.append({
                 "name": params.get("dummy_host2"),
                 "port": "24007"
             })
         if transport:
             host_dict[0]['transport'] = transport
         disk_xml.source = disk_xml.new_disk_source(**{
             "attrs": source_dict,
             "hosts": host_dict
         })
     return disk_xml
Esempio n. 19
0
def run_image_copy(test, params, env):
    """
    Copy guest images from nfs server.
    1) Mount the NFS share directory
    2) Check the existence of source image
    3) If it exists, copy the image from NFS

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    if vm is not None:
        vm.destroy()

    src = params.get('images_good')
    asset_name = '%s' % (os.path.split(params['image_name'])[1])
    image = '%s.%s' % (params['image_name'], params['image_format'])
    dst_path = '%s/%s' % (virttest.data_dir.get_data_dir(), image)
    image_dir = os.path.dirname(dst_path)
    if params.get("rename_error_image", "no") == "yes":
        error_image = os.path.basename(params['image_name']) + "-error"
        error_image += '.' + params['image_format']
        error_dst_path = os.path.join(image_dir, error_image)
        mv_cmd = "/bin/mv %s %s" % (dst_path, error_dst_path)
        utils.system(mv_cmd, timeout=360, ignore_status=True)

    if src:
        mount_dest_dir = params.get('dst_dir', '/mnt/images')
        if not os.path.exists(mount_dest_dir):
            try:
                os.makedirs(mount_dest_dir)
            except OSError, err:
                logging.warning('mkdir %s error:\n%s', mount_dest_dir, err)

        if not os.path.exists(mount_dest_dir):
            raise error.TestError('Failed to create NFS share dir %s' %
                                  mount_dest_dir)

        error.context("Mount the NFS share directory")
        if not utils_misc.mount(src, mount_dest_dir, 'nfs', 'ro'):
            raise error.TestError('Could not mount NFS share %s to %s' %
                                  (src, mount_dest_dir))

        error.context("Check the existence of source image")
        src_path = '%s/%s.%s' % (mount_dest_dir, asset_name,
                                 params['image_format'])
        asset_info = virttest.asset.get_file_asset(asset_name, src_path,
                                                   dst_path)
        if asset_info is None:
            raise error.TestError('Could not find %s' % image)
Esempio n. 20
0
def run_image_copy(test, params, env):
    """
    Copy guest images from nfs server.
    1) Mount the NFS share directory
    2) Check the existence of source image
    3) If it exists, copy the image from NFS

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    if vm is not None:
        vm.destroy()

    src = params.get('images_good')
    asset_name = '%s' % (os.path.split(params['image_name'])[1])
    image = '%s.%s' % (params['image_name'], params['image_format'])
    dst_path = storage.get_image_filename(params, data_dir.get_data_dir())
    image_dir = os.path.dirname(dst_path)
    if params.get("rename_error_image", "no") == "yes":
        error_image = os.path.basename(params['image_name']) + "-error"
        error_image += '.' + params['image_format']
        error_dst_path = os.path.join(image_dir, error_image)
        mv_cmd = "/bin/mv %s %s" % (dst_path, error_dst_path)
        utils.system(mv_cmd, timeout=360, ignore_status=True)

    if src:
        mount_dest_dir = params.get('dst_dir', '/mnt/images')
        if not os.path.exists(mount_dest_dir):
            try:
                os.makedirs(mount_dest_dir)
            except OSError, err:
                logging.warning('mkdir %s error:\n%s', mount_dest_dir, err)

        if not os.path.exists(mount_dest_dir):
            raise error.TestError('Failed to create NFS share dir %s' %
                                  mount_dest_dir)

        error.context("Mount the NFS share directory")
        if not utils_misc.mount(src, mount_dest_dir, 'nfs', 'ro'):
            raise error.TestError('Could not mount NFS share %s to %s' %
                                  (src, mount_dest_dir))

        error.context("Check the existence of source image")
        src_path = '%s/%s.%s' % (mount_dest_dir, asset_name,
                                 params['image_format'])
        asset_info = virttest.asset.get_file_asset(asset_name, src_path,
                                                   dst_path)
        if asset_info is None:
            raise error.TestError('Could not find %s' % image)
 def build_disk_xml(disk_img, disk_format, host_ip):
     """
     Try to rebuild disk xml
     """
     if default_pool:
         disk_xml = Disk(type_name="file")
     else:
         disk_xml = Disk(type_name="network")
     disk_xml.device = "disk"
     driver_dict = {"name": "qemu",
                    "type": disk_format,
                    "cache": "none"}
     if driver_iothread:
         driver_dict.update({"iothread": driver_iothread})
     disk_xml.driver = driver_dict
     disk_xml.target = {"dev": "vdb", "bus": "virtio"}
     if default_pool:
         utils_misc.mount("%s:%s" % (host_ip, vol_name),
                          default_pool, "glusterfs")
         process.run("setsebool virt_use_fusefs on", shell=True)
         source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict})
     else:
         source_dict = {"protocol": "gluster",
                        "name": "%s/%s" % (vol_name, disk_img)}
         host_dict = [{"name": host_ip, "port": "24007"}]
         # If mutiple_hosts is True, attempt to add multiple hosts.
         if multiple_hosts:
             host_dict.append({"name": params.get("dummy_host1"), "port": "24007"})
             host_dict.append({"name": params.get("dummy_host2"), "port": "24007"})
         if transport:
             host_dict[0]['transport'] = transport
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict, "hosts": host_dict})
     return disk_xml
Esempio n. 22
0
def v2v_mount(src, dst='v2v_mount_point', fstype='nfs'):
    """
    Mount nfs src to dst

    :param src: NFS source
    :param dst: NFS mount point
    """
    mount_point = os.path.join(data_dir.get_tmp_dir(), dst)
    if not os.path.exists(mount_point):
        os.makedirs(mount_point)

    if not utils_misc.mount(src, mount_point, fstype, verbose=True):
        raise exceptions.TestError('Mount %s for %s failed' %
                                   (src, mount_point))

    return mount_point
Esempio n. 23
0
    @param env: Dictionary with test environment.
    """
    mount_dest_dir = params.get('dst_dir', '/mnt/images')
    if not os.path.exists(mount_dest_dir):
        try:
            os.makedirs(mount_dest_dir)
        except OSError, err:
            logging.warning('mkdir %s error:\n%s', mount_dest_dir, err)

    if not os.path.exists(mount_dest_dir):
        raise error.TestError('Failed to create NFS share dir %s' %
                              mount_dest_dir)

    src = params.get('images_good')
    image = '%s.%s' % (os.path.split(params['image_name'])[1],
                       params['image_format'])
    src_path = os.path.join(mount_dest_dir, image)
    dst_path = '%s.%s' % (params['image_name'], params['image_format'])
    cmd = 'cp %s %s' % (src_path, dst_path)

    if not utils_misc.mount(src, mount_dest_dir, 'nfs', 'ro'):
        raise error.TestError('Could not mount NFS share %s to %s' %
                              (src, mount_dest_dir))

    # Check the existence of source image
    if not os.path.exists(src_path):
        raise error.TestError('Could not find %s in NFS share' % src_path)

    logging.debug('Copying image %s...', image)
    utils.system(cmd)
Esempio n. 24
0
def run(test, params, env):
    """
    Test hpt resizing
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = eval(params.get('error_msg', '[]'))

    hpt_attrs = eval(params.get('hpt_attrs', '{}'))
    hpt_order_path = params.get('hpt_order_path', '')
    cpu_attrs = eval(params.get('cpu_attrs', '{}'))
    numa_cell = eval(params.get('numa_cell', '{}'))
    hugepage = 'yes' == params.get('hugepage', 'no')
    maxpagesize = int(params.get('maxpagesize', 0))
    check_hp = 'yes' == params.get('check_hp', 'no')
    qemu_check = params.get('qemu_check', '')
    skip_p8 = 'yes' == params.get('skip_p8', 'no')

    def set_hpt(vmxml, sync, **attrs):
        """
        Set resizing value to vm xml

        :param vmxml: xml of vm to be manipulated
        :param sync: whether to sync vmxml after
        :param attrs: attrs to set to hpt xml
        """
        if vmxml.xmltreefile.find('/features'):
            features_xml = vmxml.features
        else:
            features_xml = vm_xml.VMFeaturesXML()
        hpt_xml = vm_xml.VMFeaturesHptXML()
        for attr in attrs:
            setattr(hpt_xml, attr, attrs[attr])
        features_xml.hpt = hpt_xml
        vmxml.features = features_xml
        logging.debug(vmxml)
        if sync:
            vmxml.sync()

    def set_cpu(vmxml, **attrs):
        """
        Set cpu attrs for vmxml according to given attrs

        :param vmxml: xml of vm to be manipulated
        :param attrs: attrs to set to cpu xml
        """
        if vmxml.xmltreefile.find('cpu'):
            cpu = vmxml.cpu
        else:
            cpu = vm_xml.VMCPUXML()
        if 'numa_cell' in attrs:
            cpu.xmltreefile.create_by_xpath('/numa')
            cpu.numa_cell = attrs['numa_cell']
        for key in attrs:
            setattr(cpu, key, attrs[key])
        vmxml.cpu = cpu
        vmxml.sync()

    def set_memory(vmxml):
        """
        Set memory attributes in vm xml
        """
        vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848))
        vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16))
        vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB')

        logging.debug(numa_cell)
        if numa_cell:
            # Remove cpu topology to avoid that it doesn't match vcpu count
            if vmxml.get_cpu_topology():
                new_cpu = vmxml.cpu
                new_cpu.del_topology()
                vmxml.cpu = new_cpu
            vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1
        vmxml.sync()

    def check_hpt_order(session, resizing=''):
        """
        Return htp order in hpt_order file by default
        If 'resizing' is disabled, test updating htp_order
        """
        if not hpt_order_path:
            test.cancel('No hpt order path provided.')
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        if resizing == 'disabled':
            cmd_result = session.cmd_status_output(
                'echo %d > %s' % (hpt_order + 1, hpt_order_path))
            result = process.CmdResult(stderr=cmd_result[1],
                                       exit_status=cmd_result[0])
            libvirt.check_exit_status(result, True)
            libvirt.check_result(result, error_msg)
        return hpt_order

    def check_hp_in_vm(session, page_size):
        """
        Check if hugepage size is correct inside vm

        :param session: the session of the running vm
        :param page_size: the expected pagesize to be checked inside vm
        """
        expect = False if int(page_size) == 65536 else True
        meminfo = session.cmd_output('cat /proc/meminfo|grep Huge')
        logging.info('meminfo: \n%s', meminfo)
        pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024)
        logging.info('"%s" should %s be found in meminfo output', pattern,
                     '' if expect else 'not')
        result = expect == bool(re.search(pattern, meminfo))
        if not result:
            test.fail('meminfo output not meet expectation')

        # Check PAGE_SIZE in another way
        if not expect:
            conf_page_size = session.cmd_output('getconf PAGE_SIZE')
            logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size)
            if int(conf_page_size) != int(page_size):
                test.fail(
                    'PAGE_SIZE not correct, should be %r, actually is %r' %
                    (page_size, conf_page_size))

    bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        arch = platform.machine()
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        resizing = hpt_attrs.get('resizing')

        # Test on ppc64le hosts
        if arch.lower() == 'ppc64le':
            cpu_arch = cpu.get_cpu_arch()
            logging.debug('cpu_arch is: %s', cpu_arch)
            if skip_p8 and cpu_arch == 'power8':
                test.cancel('This case is not for POWER8')
            if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0):
                test.cancel('Qemu version is too low, '
                            'does not support maxpagesize setting')
            if maxpagesize == 16384 and cpu_arch == 'power9':
                test.cancel('Power9 does not support 16M pagesize.')

            set_hpt(vmxml, True, **hpt_attrs)
            if cpu_attrs or numa_cell:
                if numa_cell:
                    cpu_attrs['numa_cell'] = numa_cell
                set_cpu(vmxml, **cpu_attrs)
            if hugepage:
                vm_mem = vmxml.max_mem
                host_hp_size = utils_memory.get_huge_page_size()

                # Make 100m extra memory just to be safe
                hp_count = max((vm_mem + 102400) // host_hp_size, 1200)
                vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True)

                # Set up hugepage env
                mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs'
                if not os.path.isdir(hp_path):
                    process.run('mkdir %s' % hp_path, verbose=True)
                utils_memory.set_num_huge_pages(hp_count)
                if utils_misc.is_mounted(mnt_source,
                                         hp_path,
                                         fstype,
                                         verbose=True):
                    utils_misc.umount(mnt_source,
                                      hp_path,
                                      fstype,
                                      verbose=True)
                utils_misc.mount(mnt_source, hp_path, fstype, verbose=True)

                # Restart libvirtd service to make sure mounted hugepage
                # be recognized
                utils_libvirtd.libvirtd_restart()

            if resizing == 'enabled':
                set_memory(vmxml)
            logging.debug('vmxml: \n%s', vmxml)

            # Start vm and check if start succeeds
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result, expect_error=status_error)

            # if vm is not suposed to start, terminate test
            if status_error:
                libvirt.check_result(result, error_msg)
                return

            libvirt.check_qemu_cmd_line(qemu_check)
            session = vm.wait_for_login()
            hpt_order = check_hpt_order(session, resizing)

            # Check hugepage inside vm
            if check_hp:
                check_hp_in_vm(session, maxpagesize * 1024)

            if resizing == 'enabled':
                mem_xml = utils_hotplug.create_mem_xml(
                    tg_size=int(params.get('mem_size', 2048000)),
                    tg_sizeunit=params.get('size_unit', 'KiB'),
                    tg_node=int(params.get('mem_node', 0)),
                    mem_model=params.get('mem_model', 'dimm'))
                logging.debug(mem_xml)

                # Attach memory device to the guest for 12 times
                # that will reach the maxinum memory limitation
                for i in range(12):
                    virsh.attach_device(vm_name,
                                        mem_xml.xml,
                                        debug=True,
                                        ignore_status=False)
                xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name)
                logging.debug(xml_after_attach)

                # Check dumpxml of the guest,
                # check if each device has its alias
                for i in range(12):
                    pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i
                    logging.debug('Searching for %s', pattern)
                    if not re.search(pattern, str(
                            xml_after_attach.xmltreefile)):
                        test.fail('Missing memory alias: %s' % pattern)

        # Test on non-ppc64le hosts
        else:
            set_hpt(vmxml, sync=False, **hpt_attrs)
            result = virsh.define(vmxml.xml)
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, error_msg)

    finally:
        bk_xml.sync()
        if hugepage:
            utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs')
            utils_memory.set_num_huge_pages(0)
Esempio n. 25
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # Skip cases early
    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("This test requires two VMs")
        # confirm no VM running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VMs are alive")
        err_range = float(params.get("mem_error_range", 1.25))
    else:
        vm_names.append(params.get("main_vm"))
        if test_type == "stress":
            target_path = params.get("target_path", "/tmp/test.out")
        elif test_type == "unixbench":
            unixbench_control_file = params.get("unixbench_controle_file",
                                                "unixbench5.control")

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    try:
        for vm_name in vm_names:
            # try to start vm and login
            try:
                vm = env.get_vm(vm_name)
                vm.start()
            except VMError, e:
                if mb_enable and not tlbfs_enable:
                    # if hugetlbfs not be mounted,
                    # VM start with memoryBacking tag will fail
                    logging.debug(e)
                else:
                    error_msg = "Test failed in positive case. error: %s\n" % e
                    raise error.TestFail(error_msg)
            if vm.is_alive() is not True:
                break
            vms.append(vm)

            # try to login and run some program
            try:
                session = vm.wait_for_login()
            except (LoginError, ShellError), e:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                raise error.TestFail(error_msg)
            sessions.append(session)

            if test_type == "stress":
                # prepare file for increasing stress
                stress_path = prepare_c_file()
                remote.scp_to_remote(vm.get_address(), 22,
                                     'root', params.get('password'),
                                     stress_path, "/tmp/")
                # Try to install gcc on guest first
                utils_package.package_install(["gcc"], session, 360)
                # increasing workload
                session.cmd("gcc %s -o %s" % (stress_path, target_path))
                session.cmd("%s &" % target_path)

            if test_type == "unixbench":
                params["main_vm"] = vm_name
                params["test_control_file"] = unixbench_control_file

                control_path = os.path.join(test.virtdir, "control",
                                            unixbench_control_file)
                # unixbench test need 'patch' and 'perl' commands installed
                utils_package.package_install(["patch", "perl"], session, 360)
                command = utils_test.run_autotest(vm, session, control_path,
                                                  None, None, params,
                                                  copy_only=True)
                session.cmd("%s &" % command, ignore_all_errors=True)
                # wait for autotest running on vm
                time.sleep(delay)

                def _is_unixbench_running():
                    cmd = "ps -ef | grep perl | grep Run"
                    return not session.cmd_status(cmd)
                if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                    raise error.TestNAError("Failed to run unixbench in guest,"
                                            " please make sure some necessary"
                                            " packages are installed in guest,"
                                            " such as gcc, tar, bzip2")
                logging.debug("Unixbench test is running in VM")
Esempio n. 26
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    if test_type == "contrast":
        range = float(params.get("mem_error_range", 1.25))
    elif test_type == "stress":
        target_path = params.get("target_path", "/tmp/test.out")
    elif test_type == "unixbench":
        unixbench_control_file = params.get("unixbench_controle_file",
                                            "unixbench5.control")

    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("Hugepage Stress Test need two VM(s).")
        # confirm no VM(s) running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VM(s) is living.")
    else:
        vm_names.append(params.get("main_vm"))

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    for vm_name in vm_names:
        # try to start vm and login
        try:
            vm = env.get_vm(vm_name)
            vm.start()
        except VMError, e:
            if mb_enable and not tlbfs_enable:
                # if hugetlbfs not be mounted,
                # VM start with memoryBacking tag will fail
                logging.debug(e)
                pass  # jump out of for-loop
            else:
                error_msg = "Test failed in positive case. error: %s\n" % e
                raise error.TestFail(error_msg)
        if vm.is_alive() is not True:
            break
        vms.append(vm)

        # try to login and run some program
        try:
            session = vm.wait_for_login()
        except (LoginError, ShellError), e:
            error_msg = "Test failed in positive case.\n error: %s\n" % e
            raise error.TestFail(error_msg)
Esempio n. 27
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    if test_type == "contrast":
        range = float(params.get("mem_error_range", 1.25))
    elif test_type == "stress":
        target_path = params.get("target_path", "/tmp/test.out")
    elif test_type == "unixbench":
        unixbench_control_file = params.get("unixbench_controle_file",
                                            "unixbench5.control")

    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("Hugepage Stress Test need two VM(s).")
        # confirm no VM(s) running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VM(s) is living.")
    else:
        vm_names.append(params.get("main_vm"))

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    for vm_name in vm_names:
        # try to start vm and login
        try:
            vm = env.get_vm(vm_name)
            vm.start()
        except VMError, e:
            if mb_enable and not tlbfs_enable:
                # if hugetlbfs not be mounted,
                # VM start with memoryBacking tag will fail
                logging.debug(e)
                pass  # jump out of for-loop
            else:
                error_msg = "Test failed in positive case. error: %s\n" % e
                raise error.TestFail(error_msg)
        if vm.is_alive() is not True:
            break
        vms.append(vm)

        # try to login and run some program
        try:
            session = vm.wait_for_login()
        except (LoginError, ShellError), e:
            error_msg = "Test failed in positive case.\n error: %s\n" % e
            raise error.TestFail(error_msg)
Esempio n. 28
0
def run(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        test.cancel("Host only has one NUMA node, skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = params.get_numeric("tmpfs_size")
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size == 0:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = get_tmpfs_write_speed()
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)
    utils_memory.drop_caches()

    if utils_memory.freememtotal() < tmpfs_size:
        test.cancel("Host does not have enough free memory to run the test, "
                    "skipping test...")

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    test_mem = float(params.get("mem")) * float(params.get("mem_ratio", 0.8))
    stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2)
    most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if os.path.exists(memory_file):
            os.remove(memory_file)
        utils_memory.drop_caches()
        if utils_memory.freememtotal() < tmpfs_size:
            test.error("Don't have enough memory to execute this "
                       "test after %s round" % test_round)
        error_context.context("Executing stress test round: %s" % test_round,
                              logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error_context.context(
            "Try to allocate memory in node %s" % numa_node_malloc,
            logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            process.system(numa_dd_cmd, timeout=dd_timeout, shell=True)
        except Exception as error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                test.fail("Can not allocate memory in node %s."
                          " Error message:%s" %
                          (numa_node_malloc, str(error_msg)))
        error_context.context("Run memory heavy stress in guest", logging.info)
        stress_test = utils_test.VMStress(vm,
                                          "stress",
                                          params,
                                          stress_args=stress_args)
        stress_test.load_stress_tool()
        error_context.context("Get the qemu process memory use status",
                              logging.info)
        node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            test.fail("Memory still stick in node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        stress_test.unload_stress()
        stress_test.clean()
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()

    session.close()
Esempio n. 29
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # Skip cases early
    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            test.cancel("This test requires two VMs")
        # confirm no VM running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            test.cancel("one or more VMs are alive")
        err_range = float(params.get("mem_error_range", 1.25))
    else:
        vm_names.append(params.get("main_vm"))
        if test_type == "stress":
            target_path = params.get("target_path", "/tmp/test.out")
        elif test_type == "unixbench":
            unixbench_control_file = params.get("unixbench_controle_file",
                                                "unixbench5.control")

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    try:
        for vm_name in vm_names:
            # try to start vm and login
            try:
                vm = env.get_vm(vm_name)
                vm.start()
            except VMError as e:
                if mb_enable and not tlbfs_enable:
                    # if hugetlbfs not be mounted,
                    # VM start with memoryBacking tag will fail
                    logging.debug(e)
                else:
                    error_msg = "Test failed in positive case. error: %s\n" % e
                    test.fail(error_msg)
            if vm.is_alive() is not True:
                break
            vms.append(vm)

            # try to login and run some program
            try:
                session = vm.wait_for_login()
            except (LoginError, ShellError) as e:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                test.fail(error_msg)
            sessions.append(session)

            if test_type == "stress":
                # prepare file for increasing stress
                stress_path = prepare_c_file()
                remote.scp_to_remote(vm.get_address(), 22,
                                     'root', params.get('password'),
                                     stress_path, "/tmp/")
                # Try to install gcc on guest first
                utils_package.package_install(["gcc"], session, 360)
                # increasing workload
                session.cmd("gcc %s -o %s" % (stress_path, target_path))
                session.cmd("%s &" % target_path)

            if test_type == "unixbench":
                params["main_vm"] = vm_name
                params["test_control_file"] = unixbench_control_file

                control_path = os.path.join(test.virtdir, "control",
                                            unixbench_control_file)
                # unixbench test need 'patch' and 'perl' commands installed
                utils_package.package_install(["patch", "perl"], session, 360)
                command = utils_test.run_autotest(vm, session, control_path,
                                                  None, None, params,
                                                  copy_only=True)
                session.cmd("%s &" % command, ignore_all_errors=True)
                # wait for autotest running on vm
                time.sleep(delay)

                def _is_unixbench_running():
                    cmd = "ps -ef | grep perl | grep Run"
                    return not session.cmd_status(cmd)
                if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                    test.cancel("Failed to run unixbench in guest,"
                                " please make sure some necessary"
                                " packages are installed in guest,"
                                " such as gcc, tar, bzip2")
                logging.debug("Unixbench test is running in VM")

        if test_type == "contrast":
            # wait for vm finish starting completely
            time.sleep(delay)

        if not (mb_enable and not tlbfs_enable):
            logging.debug("starting analyzing the hugepage usage...")
            pid = vms[-1].get_pid()
            started_free = utils_memory.get_num_huge_pages_free()
            # Get the thp usage from /proc/pid/smaps
            started_anon = utils_memory.get_num_anon_huge_pages(pid)
            static_used = non_started_free - started_free
            hugepage_used = static_used * page_size

            if test_type == "contrast":
                # get qemu-kvm memory consumption by top
                cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid
                rate = process.run(cmd, ignore_status=False,
                                   verbose=True, shell=True).stdout_text.strip()
                qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100
                logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d",
                              rate, qemu_kvm_used, hugepage_used)
                if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1):
                    test.fail("Error for hugepage usage")
            if test_type == "stress":
                if non_started_free <= started_free:
                    logging.debug("hugepage usage:%d -> %d", non_started_free,
                                  started_free)
                    test.fail("Error for hugepage usage with stress")
            if mb_enable is not True:
                if static_used > 0:
                    test.fail("VM use static hugepage without"
                              " memoryBacking element")
                if thp_enable is not True and started_anon > 0:
                    test.fail("VM use transparent hugepage, while"
                              " it's disabled")
            else:
                if tlbfs_enable is not True:
                    if static_used > 0:
                        test.fail("VM use static hugepage without tlbfs"
                                  " mounted")
                    if thp_enable and started_anon <= 0:
                        test.fail("VM doesn't use transparent"
                                  " hugepage")
                else:
                    if shp_num > 0:
                        if static_used <= 0:
                            test.fail("VM doesn't use static"
                                      " hugepage")
                    else:
                        if static_used > 0:
                            test.fail("VM use static hugepage,"
                                      " while it's set to zero")
                    if thp_enable is not True:
                        if started_anon > 0:
                            test.fail("VM use transparent hugepage,"
                                      " while it's disabled")
                    else:
                        if shp_num == 0 and started_anon <= 0:
                            test.fail("VM doesn't use transparent"
                                      " hugepage, while static"
                                      " hugepage is disabled")
    finally:
        # end up session
        for session in sessions:
            session.close()

        for vm in vms:
            if vm.is_alive():
                vm.destroy()

        for vm_name in vm_names:
            if mb_enable:
                vm_xml.VMXML.del_memoryBacking_tag(vm_name)
            else:
                vm_xml.VMXML.set_memoryBacking_tag(vm_name)

        utils_libvirtd.libvirtd_restart()

        if tlbfs_enable is True:
            if tlbfs_status is not True:
                utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        else:
            if tlbfs_status is True:
                utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_memory.set_num_huge_pages(shp_orig_num)
        utils_memory.set_transparent_hugepage(thp_orig_status)
Esempio n. 30
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mnt_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target_path", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')

    def create_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            process.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            process.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            test.error("Fail to find tmp target file name when converting vm"
                       " disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = process.system_output("grep -R \"%s\" %s" %
                                        (ovf_id, export_vm_dir))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file",
                         expected_vmtype)
        else:
            test.fail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            test.error("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    test.fail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    test.fail("%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                test.fail("%s format is not %s" % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(os.path.join(output_storage,
                                                expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            test.fail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            test.fail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            test.fail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            logging.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                test.fail('vm_snapshot_id consists with "0"')
        else:
            test.fail('Fail to find snapshot_id')

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i-1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        logging.debug('Source info to check: %s', source_info)
        checklist = ['nr vCPUs',  'hypervisor type', 'source name', 'memory',
                     'display', 'CPU features', 'disks', 'NICs']
        for key in checklist:
            if key not in source_info:
                test.fail('%s info missing' % key)

        # Check single values
        fail = []
        xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'
        check_map['display'] = xml.get_graphics_devices()[0].type_name

        logging.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            logging.info('%-15s:%18s <-> %s', key, source_info[key], check_map[key])
            if source_info[key] != str(check_map[key]):
                fail.append(key)

        # Check disk info
        disk = xml.get_disk_all().values()[0]
        bus, type = disk.find('target').get('bus'), disk.find('driver').get('type')
        path = disk.find('source').get('file')
        disks_info = "%s (%s) [%s]" % (path, type, bus)
        source_disks = source_info['disks'].split()
        source_disks_path = source_disks[0]
        source_disks_type = source_disks[1].strip('()')
        source_disks_bus = source_disks[2].strip('[]')
        logging.info('disks:%s<->%s', source_info['disks'], disks_info)
        if source_disks_path != path or source_disks_type != type or bus not in source_disks_bus:
            fail.append('disks')

        # Check nic info
        nic = xml.get_iface_all().values()[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        logging.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if nic_info.lower() not in source_info['NICs'].lower():
            fail.append('NICs')

        # Check cpu features
        feature_list = xml.features.get_feature_list()
        logging.info('CPU features:%s<->%s', source_info['CPU features'], feature_list)
        if sorted(source_info['CPU features'].split(',')) != sorted(feature_list):
            fail.append('CPU features')

        if fail:
            test.fail('Source info not correct for: %s' % fail)

    def check_man_page(in_man, not_in_man):
        """
        Check if content of man page or help info meets expectation
        """
        man_page = process.run('man virt-v2v', verbose=False).stdout.strip()
        if in_man:
            logging.info('Checking man page of virt-v2v for "%s"', in_man)
            if in_man not in man_page:
                test.fail('"%s" not in man page' % in_man)
        if not_in_man:
            logging.info('Checking man page of virt-v2v for "%s"', not_in_man)
            if not_in_man in man_page:
                test.fail('"%s" not removed from man page' % not_in_man)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and len(line) > 72:
                        test.fail('Error log longer than 72 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass
                if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == 'vmx':
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip()) != 0:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if 'VMF' not in output:
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img, ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in result.stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(result.stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    expect_output = open(params['example_file']).read().strip()
                    logging.debug(expect_output)
                    if expect_output != result.stdout.strip():
                        test.fail('machine readable content not correct')
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)
                disk_check = process.run('qemu-img check %s' % img_path).stdout
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            logging.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            logging.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'without_ic':
                input_option = '-i %s %s' % (input_mode, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            test.cancel("Unsupported input mode: %s" % input_mode)
        else:
            test.error("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
            if checkpoint == 'rhv':
                output_option = output_option.replace('rhev', 'rhv')
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                test.error("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in ['length_of_error', 'empty_nic_source_network',
                                'empty_nic_source_bridge']:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from souce and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(), os.path.basename(disk_img))
                logging.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = string.replace(input_option, disk_img, disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                test.cancel("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host, user=user,
                                  port=22, password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                logging.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            vpx_passwd = params.get("vpx_password")
            vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Work around till bug fixed
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            create_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool')
            create_pool(user_pool=True)
            logging.debug(virsh.pool_list(uri='qemu:///session'))
            sh_install_vm = params.get('sh_install_vm')
            if not sh_install_vm:
                test.error('Source vm installing script missing')
            with open(sh_install_vm) as fh:
                cmd_install_vm = fh.read().strip()
            process.run('su - %s -c "%s"' % (v2v_user, cmd_install_vm),
                        timeout=10, shell=True)

        if checkpoint == 'vmx':
            mount_point = params.get('mount_point')
            if not os.path.isdir(mount_point):
                os.mkdir(mount_point)
            nfs_vmx = params.get('nfs_vmx')
            if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True):
                test.error('Mount nfs for vmx failed')
            vmx = params.get('vmx')
            input_option = '-i vmx %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option,
                               output_option, v2v_options)
        if v2v_user:
            cmd = su_cmd + "'%s'" % cmd

        if params.get('cmd_free') == 'yes':
            cmd = params.get('check_command')

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(), 'tail_log')
            params['tail'] = aexpect.Tail(
                    command='tail -f /var/log/messages',
                    output_func=utils_misc.log_line,
                    output_params=(params['tail_log'],)
            )
        cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except:
                    logging.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool')
                    cleanup_pool(user_pool=True)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag:
            vmcheck = utils_v2v.VMCheck(test, params, env)
            vmcheck.cleanup()
        if new_v2v_user:
            process.system("userdel -f %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
        if checkpoint == 'vmx':
            utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs')
            os.rmdir(params['mount_point'])
Esempio n. 31
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("nfs_storage")
    mnt_point = params.get("mount_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target_path", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    restore_image_owner = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')

    def create_pool():
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session":
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            process.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool():
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session":
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            process.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\sconvert\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            raise exceptions.TestError("Fail to find tmp target file name when"
                                       " converting vm disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = process.system_output("grep -R \"%s\" %s" %
                                        (ovf_id, export_vm_dir))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file",
                         expected_vmtype)
        else:
            raise exceptions.TestFail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            raise exceptions.TestError("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    raise exceptions.TestFail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    raise exceptions.TestFail("%s is not a preallocated image"
                                              % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                raise exceptions.TestFail("%s format is not %s"
                                          % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(os.path.join(output_storage,
                                                expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            raise exceptions.TestFail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            raise exceptions.TestFail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            raise exceptions.TestFail("Not find message: %s" % init_msg)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and line > 72:
                        raise exceptions.TestFail('Error log longer than 72 '
                                                  'charactors: %s', line)
            else:
                error_map = {
                    'conflict_options': ['option used more than once'],
                    'xen_no_output_format': ['The input metadata did not define'
                                             ' the disk format']
                }
                if not utils_v2v.check_log(output, error_map[checkpoint]):
                    raise exceptions.TestFail('Not found error message %s' %
                                              error_map[checkpoint])
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "allocation", expected_mode)
            if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    raise exceptions.TestFail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == 'quiet':
                if len(output.strip()) != 0:
                    raise exceptions.TestFail('Output is not empty in quiet mode')

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode in ['libvirtxml', 'ova']:
            raise exceptions.TestNAError("Unsupported input mode: %s" % input_mode)
        else:
            raise exceptions.TestError("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
        output_format = params.get("output_format")
        if output_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                raise exceptions.TestError("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.mkdir(vdsm_domain_dir)
                os.mkdir(vdsm_image_dir)
                os.mkdir(vdsm_vm_dir)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint == 'length_of_error':
            pass
        else:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Change the image owner and group
                ori_owner = os.stat(disk_img).st_uid
                ori_group = os.stat(disk_img).st_uid
                os.chown(disk_img, user_info.pw_uid, user_info.pw_gid)
                restore_image_owner = True
            else:
                raise exceptions.TestNAError("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'
            user = params.get("xen_host_user", "root")
            passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host, user=user,
                                  port=22, password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'
            vpx_passwd = params.get("vpx_passwd")
            vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option,
                               output_option, v2v_options)
        if v2v_user:
            cmd = su_cmd + "'%s'" % cmd
        cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                process.system(cmd)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag:
            vmcheck = utils_v2v.VMCheck(test, params, env)
            vmcheck.cleanup()
        if new_v2v_user:
            process.system("userdel -f %s" % v2v_user)
        if restore_image_owner:
            os.chown(disk_img, ori_owner, ori_group)
        if backup_xml:
            backup_xml.sync()
Esempio n. 32
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("nfs_storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mount_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target_path", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')

    def create_pool(user_pool=False,
                    pool_name=pool_name,
                    pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            process.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool(user_pool=False,
                     pool_name=pool_name,
                     pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            process.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            raise exceptions.TestError("Fail to find tmp target file name when"
                                       " converting vm disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = process.system_output("grep -R \"%s\" %s" %
                                        (ovf_id, export_vm_dir))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            raise exceptions.TestFail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            raise exceptions.TestError("Image path: '%s' is invalid" %
                                       img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    raise exceptions.TestFail("%s is not a sparse image" %
                                              img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    raise exceptions.TestFail(
                        "%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                raise exceptions.TestFail("%s format is not %s" %
                                          (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            raise exceptions.TestFail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            raise exceptions.TestFail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            raise exceptions.TestFail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            logging.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                raise exceptions.TestFail('vm_snapshot_id consists with "0"')
        else:
            raise exceptions.TestFail('Fail to find snapshot_id')

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i - 1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        logging.debug('Source info to check: %s', source_info)
        checklist = [
            'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'display',
            'CPU features', 'disks', 'NICs'
        ]
        for key in checklist:
            if key not in source_info:
                raise exceptions.TestFail('%s info missing' % key)

        # Check single values
        fail = []
        xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'
        check_map['display'] = xml.get_graphics_devices()[0].type_name

        logging.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            logging.info('%-15s:%18s <-> %s', key, source_info[key],
                         check_map[key])
            if source_info[key] != str(check_map[key]):
                fail.append(key)

        # Check disk info
        disk = xml.get_disk_all().values()[0]
        bus, type = disk.find('target').get('bus'), disk.find('driver').get(
            'type')
        path = disk.find('source').get('file')
        disks_info = "%s (%s) [%s]" % (path, type, bus)
        logging.info('disks:%s<->%s', source_info['disks'], disks_info)
        if source_info['disks'] != disks_info:
            fail.append('disks')

        # Check nic info
        nic = xml.get_iface_all().values()[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        logging.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if source_info['NICs'].lower() != nic_info.lower():
            fail.append('NICs')

        # Check cpu features
        feature_list = xml.features.get_feature_list()
        logging.info('CPU features:%s<->%s', source_info['CPU features'],
                     feature_list)
        if sorted(source_info['CPU features'].split(',')) != sorted(
                feature_list):
            fail.append('CPU features')

        if fail:
            raise exceptions.TestFail('Source info not correct for: %s' % fail)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and len(line) > 72:
                        raise exceptions.TestFail(
                            'Error log longer than 72 '
                            'charactors: %s', line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    raise exceptions.TestFail('Disk exists for vm %s' %
                                              vm_name)
            else:
                error_map = {
                    'conflict_options': ['option used more than once'],
                    'xen_no_output_format':
                    ['The input metadata did not define'
                     ' the disk format'],
                    'in_place':
                    ['virt-v2v: error: --in-place cannot be used in RHEL 7']
                }
                if error_map.has_key(checkpoint) and not utils_v2v.check_log(
                        output, error_map[checkpoint]):
                    raise exceptions.TestFail('Not found error message %s' %
                                              error_map[checkpoint])
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    raise exceptions.TestFail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    raise exceptions.TestFail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == 'quiet':
                if len(output.strip()) != 0:
                    raise exceptions.TestFail(
                        'Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    raise exceptions.TestFail(
                        'libguestfs-winsupport not in dependency')
                if 'qemu-kvm-rhev' in output:
                    raise exceptions.TestFail('qemu-kvm-rhev is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    raise exceptions.TestFail('Command "%s" success' %
                                              command % win_img)
            if checkpoint == 'no_dcpath':
                if not utils_v2v.check_log(output, ['--dcpath'], expect=False):
                    raise exceptions.TestFail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    raise exceptions.TestFail(
                        'Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    raise exceptions.TestFail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in result.stdout.lower():
                    raise exceptions.TestFail('Expect log not found: %s' %
                                              target_str)
            if checkpoint == 'print_source':
                check_source(result.stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    expect_output = open(params['example_file']).read().strip()
                    logging.debug(expect_output)
                    logging.debug(expect_output == result.stdout.strip())
                else:
                    raise exceptions.TestError('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)
                disk_check = process.run('qemu-img check %s' % img_path).stdout
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    raise exceptions.TestFail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if not utils_v2v.check_log(messages, [msg_content],
                                           expect=False):
                    raise exceptions.TestFail(
                        'Found "%s" in /var/log/messages' % msg_content)

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            logging.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            logging.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'without_ic':
                input_option = '-i %s %s' % (input_mode, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            raise exceptions.TestSkipError("Unsupported input mode: %s" %
                                           input_mode)
        else:
            raise exceptions.TestError("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                raise exceptions.TestError("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in [
                'length_of_error', 'empty_nic_source_network',
                'empty_nic_source_bridge'
        ]:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from souce and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         os.path.basename(disk_img))
                logging.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = string.replace(input_option, disk_img,
                                              disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                raise exceptions.TestSkipError(
                    "Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host,
                                  user=user,
                                  port=22,
                                  password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                logging.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            vpx_passwd = params.get("vpx_passwd")
            vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        if hypervisor in ['esx', 'xen'
                          ] or input_mode in ['disk', 'libvirtxml']:
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            create_pool(user_pool=True,
                        pool_name='src_pool',
                        pool_target='v2v_src_pool')
            create_pool(user_pool=True)
            logging.debug(virsh.pool_list(uri='qemu:///session'))
            sh_install_vm = params.get('sh_install_vm')
            if not sh_install_vm:
                raise exceptions.TestError(
                    'Source vm installing script missing')
            process.run('su - %s -c %s' % (v2v_user, sh_install_vm))

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd = su_cmd + "'%s'" % cmd

        if checkpoint in ['dependency', 'no_dcpath']:
            cmd = params.get('check_command')

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(),
                                              'tail_log')
            params['tail'] = aexpect.Tail(command='tail -f /var/log/messages',
                                          output_func=utils_misc.log_line,
                                          output_params=(params['tail_log'], ))
        cmd_result = process.run(cmd,
                                 timeout=v2v_timeout,
                                 verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except:
                    logging.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True,
                                 pool_name='src_pool',
                                 pool_target='v2v_src_pool')
                    cleanup_pool(user_pool=True)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag:
            vmcheck = utils_v2v.VMCheck(test, params, env)
            vmcheck.cleanup()
        if new_v2v_user:
            process.system("userdel -f %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
Esempio n. 33
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        dest_path = os.path.join(data_dir.get_data_dir(), "copy")

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define',
                             'undefine', 'crash', 'device-removal-failed',
                             'watchdog', 'io-error']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, new_disk, target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "blockcommit":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs)
                    snapshot_path = dom.get_blk_devices()['vda']['source']
                    virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % snapshot_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % disk_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda completed")
                    os.unlink(snapshot_path)
                elif event == "blockcopy":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    dom.undefine()
                    virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % disk_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % dest_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda completed")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name, dimm_xml.xml,
                                        flagstr="--config", **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live)
                    expected_events_list.append("'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " + "%s" % action)
                    if action == 'pause':
                        expected_events_list.append("'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part), shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail("Domain state should still be paused due to I/O error!")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
            if os.path.exists(dest_path):
                os.unlink(dest_path)
        return [(dom.name, event) for event in expected_events_list]
Esempio n. 34
0
                    else:
                        if shp_num == 0 and started_anon <= 0:
                            raise error.TestFail("VM doesn't use transparent"
                                                 " hugepage, while static"
                                                 " hugepage is disabled")
    finally:
        # end up session
        for session in sessions:
            session.close()

        for vm in vms:
            if vm.is_alive():
                vm.destroy()

        for vm_name in vm_names:
            if mb_enable:
                vm_xml.VMXML.del_memoryBacking_tag(vm_name)
            else:
                vm_xml.VMXML.set_memoryBacking_tag(vm_name)

        utils_libvirtd.libvirtd_restart()

        if tlbfs_enable is True:
            if tlbfs_status is not True:
                utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        else:
            if tlbfs_status is True:
                utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_memory.set_num_huge_pages(shp_orig_num)
        utils_memory.set_transparent_hugepage(thp_orig_status)
Esempio n. 35
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    V2V_UNSUPPORT_GLANCE_VER = "[virt-v2v-1.45.2-1.el9,)"

    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    version_required = params.get("version_required")
    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    input_file = params.get("input_file")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    source_user = params.get("username", "root")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mnt_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')
    error_flag = 'strict'
    estimate_file = ''

    def create_pool(user_pool=False,
                    pool_name=pool_name,
                    pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir -p %s'" % target_path
            process.system(cmd, verbose=True)
            # Sometimes pool_creat_as returns success, but the pool can
            # not be found in user session.
            virsh.pool_create_as(pool_name,
                                 'dir',
                                 target_path,
                                 unprivileged_user=v2v_user,
                                 debug=True)

            res = virsh.pool_info(pool_name,
                                  unprivileged_user=v2v_user,
                                  debug=True)
            if res.exit_status != 0:
                return False
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        return True

    def cleanup_pool(user_pool=False,
                     pool_name=pool_name,
                     pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            virsh.pool_destroy(pool_name,
                               unprivileged_user=v2v_user,
                               debug=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"RHV: (?:disk:|will export \S+ to) (\S+)",
                                output)
        if len(tmp_target) < 1:
            test.error("Fail to find tmp target file name when converting vm"
                       " disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = to_text(
                process.system_output("grep -R \"%s\" %s" %
                                      (ovf_id, export_vm_dir)))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            LOG.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            LOG.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            test.fail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            test.error("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        LOG.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    LOG.info("%s is a sparse image", img_path)
                else:
                    test.fail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    LOG.info("%s is a preallocated image", img_path)
                else:
                    test.fail("%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                LOG.info("%s format is %s", img_path, expected_value)
            else:
                test.fail("%s format is not %s" % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            LOG.info("Guest name renamed when converting it")
        else:
            test.fail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            LOG.info("No image created with --no-copy option")
        else:
            test.fail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            LOG.info("Find message: %s", init_msg)
        else:
            test.fail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            LOG.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                test.fail('vm_snapshot_id consists with "0"')
        else:
            test.fail('Fail to find snapshot_id')

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i - 1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        LOG.debug('Source info to check: %s', source_info)
        checklist = [
            'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks',
            'NICs'
        ]
        if hypervisor in ['kvm', 'xen']:
            checklist.extend(['display', 'CPU features'])
        for key in checklist:
            if key not in source_info:
                test.fail('%s info missing' % key)

        v2v_virsh = None
        close_virsh = False
        if hypervisor == 'kvm':
            v2v_virsh = virsh
        else:
            virsh_dargs = {
                'uri': ic_uri,
                'remote_ip': remote_host,
                'remote_user': source_user,
                'remote_pwd': source_pwd,
                'auto_close': True,
                'debug': True
            }
            v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
            LOG.debug('a new virsh session %s was created', v2v_virsh)
            close_virsh = True

        # Check single values
        fail = []
        try:
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
        finally:
            if close_virsh:
                LOG.debug('virsh session %s is closing', v2v_virsh)
                v2v_virsh.close_session()

        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'

        if hypervisor in ['kvm', 'xen']:
            check_map['display'] = xml.get_graphics_devices()[0].type_name

        LOG.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            LOG.info('%-15s:%18s <-> %s', key, source_info[key],
                     check_map[key])
            if str(check_map[key]) not in source_info[key]:
                fail.append(key)

        # Check nic info
        nic = list(xml.get_iface_all().values())[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        LOG.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if nic_info.lower() not in source_info['NICs'].lower():
            fail.append('NICs')

        # Check cpu features
        if hypervisor in ['kvm', 'xen']:
            feature_list = xml.features.get_feature_list()
            LOG.info('CPU features:%s<->%s', source_info['CPU features'],
                     feature_list)
            if sorted(source_info['CPU features'].split(',')) != sorted(
                    feature_list):
                fail.append('CPU features')

        if fail:
            test.fail('Source info not correct for: %s' % fail)

    def check_man_page(in_man, not_in_man):
        """
        Check if content of man page or help info meets expectation
        """
        man_page = process.run('man virt-v2v',
                               verbose=False).stdout_text.strip()
        if in_man:
            LOG.info('Checking man page of virt-v2v for "%s"', in_man)
            if in_man not in man_page:
                test.fail('"%s" not in man page' % in_man)
        if not_in_man:
            LOG.info('Checking man page of virt-v2v for "%s"', not_in_man)
            if not_in_man in man_page:
                test.fail('"%s" not removed from man page' % not_in_man)

    def check_print_estimate(estimate_file):
        """
        Check disk size and total size in file of estimate created by v2v
        """
        import json

        content = None
        buf = ''
        with open(estimate_file) as fp:
            all_content = fp.read()
            fp.seek(0)
            for line in fp:
                buf += line
                if '}' not in line:
                    continue
                if 'disks' in buf and 'total' in buf:
                    content = json.loads(buf)
                    break
                buf = ''

        LOG.debug('json file content:\n%s' % all_content)
        if not content or sum(content['disks']) != content['total']:
            test.fail("The disks' size doesn't same as total value")

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint in ['length_of_error', 'line_no_wrap']:
                log_lines = output.split('\n')
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    else:
                        v2v_start = False
                    # 76 is the max length in v2v
                    if v2v_start:
                        if checkpoint == 'length_of_error' and len(line) > 76:
                            test.fail(
                                'Error log longer than 76 characters: %s' %
                                line)
                        if checkpoint == 'line_no_wrap' and len(line) < 76:
                            test.fail(
                                'Error log is shorter than 76 characters: %s' %
                                line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                LOG.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                LOG.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root:
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    vmchecker = VMChecker(test, params, env)
                    params['vmchecker'] = vmchecker
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint in ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            # check 'yum deplist virt-v2v'
            if checkpoint == 'deplist':
                if 'platform-python' not in output:
                    test.fail('platform-python is not in dependency')
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                LOG.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    LOG.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                LOG.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                if utils_v2v.multiple_versions_compare(
                                        V2V_UNSUPPORT_GLANCE_VER
                                ) and 'glance' in line:
                                    continue
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                LOG.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                LOG.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                LOG.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                LOG.info('Content of /var/log/messages during conversion:')
                LOG.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
            if checkpoint == 'print_estimate_tofile':
                check_print_estimate(estimate_file)
            if checkpoint == 'copy_to_local':
                vm_disk = vm_name + '-disk1'
                vm_xml_name = vm_name + '.xml'
                check_result = False
                if os.path.exists(vm_disk) and os.path.exists(vm_xml_name):
                    check_result = True
                for i in [vm_disk, vm_xml_name]:
                    if os.path.isfile(i):
                        os.remove(i)
                if not check_result:
                    test.fail(
                        'Not found disk or xml created by virt-v2v-copy-to-local'
                    )

        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if version_required and not utils_v2v.multiple_versions_compare(
                version_required):
            test.cancel("Testing requires version: %s" % version_required)

        if hypervisor == "xen":
            # See man virt-v2v-input-xen(1)
            process.run('update-crypto-policies --set LEGACY',
                        verbose=True,
                        ignore_status=True,
                        shell=True)

        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            LOG.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            LOG.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            # Remote libvirt connection is not officially supported by
            # v2v and may fail. Just use localhost to simulate a remote
            # connection to test the warnings.
            if checkpoint == 'remote_libvirt_conn':
                ic_uri = 'qemu+ssh://localhost/system'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
                input_option = "-i ova %s -ic %s -of qcow2" % (input_file,
                                                               ic_uri)
            if checkpoint == 'without_ic':
                input_option = "-i ova %s -of raw" % input_file
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            test.cancel("Unsupported input mode: %s" % input_mode)
        else:
            test.error("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                LOG.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s" % output_mode
            if output_mode != 'null':
                output_option += " -os %s" % output_storage
            if checkpoint == 'rhv':
                output_option = output_option.replace('rhev', 'rhv')
            if checkpoint in ['with_ic', 'without_ic']:
                output_option = output_option.replace('v2v_dir', 'src_pool')
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                test.error("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

            if output_mode == 'rhev':
                # create different sasl_user name for different job
                params.update({
                    'sasl_user':
                    params.get("sasl_user") +
                    utils_misc.generate_random_string(3)
                })
                LOG.info('sals user name is %s' % params.get("sasl_user"))

                user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                               params.get("sasl_pwd"))
                v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
                v2v_sasl.server_ip = params.get("remote_ip")
                v2v_sasl.server_user = params.get('remote_user')
                v2v_sasl.server_pwd = params.get('remote_pwd')
                v2v_sasl.setup(remote=True)
                LOG.debug('A SASL session %s was created', v2v_sasl)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in [
                'length_of_error', 'empty_nic_source_network', 'line_no_wrap',
                'empty_nic_source_bridge', 'machine_readable'
        ]:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
            new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            LOG.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from source and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         os.path.basename(disk_img))
                LOG.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = input_option.replace(disk_img, disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                test.cancel("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            source_pwd = passwd = params.get("xen_host_passwd", "redhat")
            LOG.info("set up ssh-agent access ")
            xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
                remote_host, user, passwd, auto_close=False)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                LOG.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            source_pwd = vpx_passwd = params.get("vpx_password")
            vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(),
                                           "vpx_passwd")
            LOG.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " -ip %s" % vpx_passwd_file
            # rhel8 slow stream doesn't support option 'ip' temporarily
            # so use option 'password-file' instead.
            if not utils_v2v.v2v_supported_option("-ip <filename>"):
                output_option = output_option.replace('-ip', '--password-file',
                                                      1)

        # if don't specify any output option for virt-v2v, 'default' pool
        # will be used.
        if output_mode is None:
            # Cleanup first to avoid failure if 'default' pool exists.
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        # Create libvirt dir pool
        if output_mode == "libvirt":
            utils_misc.wait_for(create_pool, timeout=30, step=3)

        # Work around till bug fixed
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            utils_misc.wait_for(
                lambda: create_pool(user_pool=True,
                                    pool_name='src_pool',
                                    pool_target='v2v_src_pool'),
                timeout=30,
                step=3)

        if checkpoint == 'vmx':
            mount_point = params.get('mount_point')
            if not os.path.isdir(mount_point):
                os.mkdir(mount_point)
            nfs_vmx = params.get('nfs_vmx')
            if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True):
                test.error('Mount nfs for vmx failed')
            vmx = params.get('vmx')
            input_option = '-i vmx %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'vmx_ssh':
            esx_user = params.get("esx_host_user", "root")
            esx_pwd = params.get("esx_host_passwd")
            vmx = params.get('vmx')
            esx_pubkey, esx_session = utils_v2v.v2v_setup_ssh_key(
                esx_ip, esx_user, esx_pwd, server_type='esx', auto_close=False)
            utils_misc.add_identities_into_ssh_agent()
            input_option = '-i vmx -it ssh %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'simulate_nfs':
            simulate_images = params.get("simu_images_path")
            simulate_vms = params.get("simu_vms_path")
            simulate_dom_md = params.get("simu_dom_md_path")
            os.makedirs(simulate_images)
            os.makedirs(simulate_vms)
            process.run('touch %s' % simulate_dom_md)
            process.run('chmod -R 777 /tmp/rhv/')

        if checkpoint == 'print_estimate_tofile':
            estimate_file = utils_misc.generate_tmp_file_name(
                'v2v_print_estimate')
            v2v_options += " --machine-readable=file:%s" % estimate_file

        if checkpoint == 'remote_libvirt_conn':
            # Add localhost to known_hosts
            cmd = 'ssh-keyscan -t ecdsa localhost >> ~/.ssh/known_hosts'
            process.run(cmd, shell=True)
            # Setup remote login without password
            public_key = ssh_key.get_public_key().rstrip()
            cmd = 'echo "%s" >> ~/.ssh/authorized_keys' % public_key
            process.run(cmd, shell=True)

        if checkpoint == 'length_of_error' and utils_v2v.v2v_supported_option(
                '--wrap'):
            v2v_options += ' --wrap'

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd_export_env = 'export LIBGUESTFS_BACKEND=direct'
            cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd)

        if params.get('cmd_free') == 'yes':
            cmd = params.get('check_command')
            # only set error to 'ignore' to avoid exception for RHEL7-84978
            if "guestfish" in cmd:
                error_flag = "replace"

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(),
                                              'tail_log')
            params['tail'] = aexpect.Tail(command='tail -f /var/log/messages',
                                          output_func=utils_misc.log_line,
                                          output_params=(params['tail_log'], ))

        cmd_result = process.run(cmd,
                                 timeout=v2v_timeout,
                                 verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        if checkpoint == "weak_dendency":
            utils_package.package_install(['libguestfs-xfs', 'virt-v2v'])
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except Exception:
                    LOG.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True,
                                 pool_name='src_pool',
                                 pool_target='v2v_src_pool')
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        if output_mode is None:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag and params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if new_v2v_user:
            process.system("userdel -fr %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if checkpoint == 'vmx':
            utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs')
            os.rmdir(params['mount_point'])
        if checkpoint == 'vmx_ssh':
            utils_v2v.v2v_setup_ssh_key_cleanup(esx_session, esx_pubkey, 'esx')
            process.run("ssh-agent -k")
        if checkpoint == 'simulate_nfs':
            process.run('rm -rf /tmp/rhv/')
        if os.path.exists(estimate_file):
            os.remove(estimate_file)
        if hypervisor == "xen":
            # Restore crypto-policies to DEFAULT, the setting is impossible to be
            # other values by default in testing environment.
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        if checkpoint == 'remote_libvirt_conn':
            cmd = r"sed -i '/localhost/d' ~/.ssh/known_hosts"
            process.run(cmd, shell=True, ignore_status=True)
            if locals().get('public_key'):
                key = public_key.rstrip().split()[1].split('/')[0]
                cmd = r"sed -i '/%s/d' ~/.ssh/authorized_keys" % key
                process.run(cmd, shell=True, ignore_status=True)
Esempio n. 36
0
def run_numa_stress(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        raise error.TestNAError("Host only has one NUMA node, "
                                "skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = 0
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size < node_mem:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    numa_node_malloc = -1
    most_used_node, memory_used = utils_test.max_mem_map_node(
        host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if utils_memory.freememtotal() < tmpfs_size:
            raise error.TestError("Don't have enough memory to execute this "
                                  "test after %s round" % test_round)
        error.context("Executing stress test round: %s" % test_round,
                      logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error.context("Try to allocate memory in node %s" % numa_node_malloc,
                      logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            utils.system(numa_dd_cmd, timeout=dd_timeout)
        except Exception, error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                raise error.TestFail("Can not allocate memory in node %s."
                                     " Error message:%s" %
                                     (numa_node_malloc, str(error_msg)))
        error.context("Run memory heavy stress in guest", logging.info)
        autotest_control.run_autotest_control(test, params, env)
        error.context("Get the qemu process memory use status", logging.info)
        node_after, memory_after = utils_test.max_mem_map_node(
            host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            raise error.TestFail("Memory still stick in "
                                 "node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()
Esempio n. 37
0
 def mount(self):
     """
     Mount source into given mount point.
     """
     return utils_misc.mount(self.mount_src, self.mount_dir, "nfs",
                             perm=self.mount_options)
Esempio n. 38
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    source_user = params.get("username", "root")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mnt_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')
    error_flag = 'strict'

    def create_pool(user_pool=False,
                    pool_name=pool_name,
                    pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            process.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool(user_pool=False,
                     pool_name=pool_name,
                     pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            process.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            test.error("Fail to find tmp target file name when converting vm"
                       " disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = to_text(
                process.system_output("grep -R \"%s\" %s" %
                                      (ovf_id, export_vm_dir)))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            test.fail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            test.error("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    test.fail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    test.fail("%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                test.fail("%s format is not %s" % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            test.fail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            test.fail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            test.fail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            logging.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                test.fail('vm_snapshot_id consists with "0"')
        else:
            test.fail('Fail to find snapshot_id')

    def setup_esx_ssh_key(hostname, user, password, port=22):
        """
        Setup up remote login in esx server by using public key
        """
        logging.debug('Performing SSH key setup on %s:%d as %s.' %
                      (hostname, port, user))
        try:
            session = remote.remote_login(client='ssh',
                                          host=hostname,
                                          username=user,
                                          port=port,
                                          password=password,
                                          prompt=r'[ $#%]')
            public_key = ssh_key.get_public_key()
            session.cmd("echo '%s' >> /etc/ssh/keys-root/authorized_keys; " %
                        public_key)
            logging.debug('SSH key setup complete.')
            session.close()
        except Exception as err:
            logging.debug('SSH key setup has failed. %s', err)

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i - 1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        logging.debug('Source info to check: %s', source_info)
        checklist = [
            'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks',
            'NICs'
        ]
        if hypervisor in ['kvm', 'xen']:
            checklist.extend(['display', 'CPU features'])
        for key in checklist:
            if key not in source_info:
                test.fail('%s info missing' % key)

        v2v_virsh = None
        close_virsh = False
        if hypervisor == 'kvm':
            v2v_virsh = virsh
        else:
            virsh_dargs = {
                'uri': ic_uri,
                'remote_ip': remote_host,
                'remote_user': source_user,
                'remote_pwd': source_pwd,
                'debug': True
            }
            v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
            close_virsh = True

        # Check single values
        fail = []
        try:
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
        finally:
            if close_virsh:
                v2v_virsh.close_session()

        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'

        if hypervisor in ['kvm', 'xen']:
            check_map['display'] = xml.get_graphics_devices()[0].type_name

        logging.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            logging.info('%-15s:%18s <-> %s', key, source_info[key],
                         check_map[key])
            if str(check_map[key]) not in source_info[key]:
                fail.append(key)

        # Check disk info
        disk = list(xml.get_disk_all().values())[0]

        def _get_disk_subelement_attr_value(obj, attr, subattr):
            if obj.find(attr) is not None:
                return obj.find(attr).get(subattr)

        bus = _get_disk_subelement_attr_value(disk, 'target', 'bus')
        driver_type = _get_disk_subelement_attr_value(disk, 'driver', 'type')
        path = _get_disk_subelement_attr_value(disk, 'source', 'file')

        # For esx, disk output is like "disks: json: { ... } (raw) [scsi]"
        # For xen, disk output is like "disks: json: { ... } [ide]"
        # For kvm, disk output is like "/rhel8.0-2.qcow2 (qcow2) [virtio-blk]"
        if hypervisor == 'kvm':
            disks_info_pattern = "%s \(%s\) \[%s" % (path, driver_type, bus)
        elif hypervisor == 'esx':
            # replace '.vmdk' with '-flat.vmdk', this is done in v2v
            path_pattern1 = path.split()[1].replace('.vmdk', '-flat.vmdk')
            # In newer qemu version, '_' is replaced with '%5f'.
            path_pattern2 = path_pattern1.replace('_', '%5f')
            # For esx, '(raw)' is fixed? Let's see if others will be met.
            disks_info_pattern = '|'.join([
                "https://%s/folder/%s\?dcPath=data&dsName=esx.*} \(raw\) \[%s"
                % (remote_host, i, bus)
                for i in [path_pattern1, path_pattern2]
            ])
        elif hypervisor == 'xen':
            disks_info_pattern = "file\.path.*%s.*file\.host.*%s.* \[%s" % (
                path, remote_host, bus)

        source_disks = source_info['disks'].split()
        logging.info('disks:%s<->%s', source_info['disks'], disks_info_pattern)
        if not re.search(disks_info_pattern, source_info['disks']):
            fail.append('disks')

        # Check nic info
        nic = list(xml.get_iface_all().values())[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        logging.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if nic_info.lower() not in source_info['NICs'].lower():
            fail.append('NICs')

        # Check cpu features
        if hypervisor in ['kvm', 'xen']:
            feature_list = xml.features.get_feature_list()
            logging.info('CPU features:%s<->%s', source_info['CPU features'],
                         feature_list)
            if sorted(source_info['CPU features'].split(',')) != sorted(
                    feature_list):
                fail.append('CPU features')

        if fail:
            test.fail('Source info not correct for: %s' % fail)

    def check_man_page(in_man, not_in_man):
        """
        Check if content of man page or help info meets expectation
        """
        man_page = process.run('man virt-v2v',
                               verbose=False).stdout_text.strip()
        if in_man:
            logging.info('Checking man page of virt-v2v for "%s"', in_man)
            if in_man not in man_page:
                test.fail('"%s" not in man page' % in_man)
        if not_in_man:
            logging.info('Checking man page of virt-v2v for "%s"', not_in_man)
            if not_in_man in man_page:
                test.fail('"%s" not removed from man page' % not_in_man)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and len(line) > 72:
                        test.fail('Error log longer than 72 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                test.fail(
                                    '%s not in --machine-readable output' %
                                    line.strip())
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            logging.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            logging.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'without_ic':
                input_option = '-i %s %s' % (input_mode, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            test.cancel("Unsupported input mode: %s" % input_mode)
        else:
            test.error("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
            if checkpoint == 'rhv':
                output_option = output_option.replace('rhev', 'rhv')
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                test.error("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in [
                'length_of_error', 'empty_nic_source_network',
                'empty_nic_source_bridge', 'machine_readable'
        ]:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from souce and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         os.path.basename(disk_img))
                logging.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = input_option.replace(disk_img, disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                test.cancel("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            source_pwd = passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host,
                                  user=user,
                                  port=22,
                                  password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                logging.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            source_pwd = vpx_passwd = params.get("vpx_password")
            vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(),
                                           "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # if don't specify any output option for virt-v2v, 'default' pool
        # will be used.
        if output_mode is None:
            # Cleanup first to avoid failure if 'default' pool exists.
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Work around till bug fixed
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            create_pool(user_pool=True,
                        pool_name='src_pool',
                        pool_target='v2v_src_pool')
            create_pool(user_pool=True)
            logging.debug(virsh.pool_list(uri='qemu:///session'))
            sh_install_vm = params.get('sh_install_vm')
            if not sh_install_vm:
                test.error('Source vm installing script missing')
            with open(sh_install_vm) as fh:
                cmd_install_vm = fh.read().strip()
            process.run('su - %s -c "%s"' % (v2v_user, cmd_install_vm),
                        timeout=10,
                        shell=True)
            params['cmd_clean_vm'] = "%s 'virsh undefine %s'" % (su_cmd,
                                                                 vm_name)

        if checkpoint == 'vmx':
            mount_point = params.get('mount_point')
            if not os.path.isdir(mount_point):
                os.mkdir(mount_point)
            nfs_vmx = params.get('nfs_vmx')
            if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True):
                test.error('Mount nfs for vmx failed')
            vmx = params.get('vmx')
            input_option = '-i vmx %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'vmx_ssh':
            esx_user = params.get("esx_host_user", "root")
            esx_pwd = params.get("esx_host_passwd", "123qweP")
            vmx = params.get('vmx')
            setup_esx_ssh_key(esx_ip, esx_user, esx_pwd)
            try:
                utils_misc.add_identities_into_ssh_agent()
            except Exception:
                process.run("ssh-agent -k")
                raise exceptions.TestError("Fail to setup ssh-agent")
            input_option = '-i vmx -it ssh %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'simulate_nfs':
            simulate_images = params.get("simu_images_path")
            simulate_vms = params.get("simu_vms_path")
            simulate_dom_md = params.get("simu_dom_md_path")
            os.makedirs(simulate_images)
            os.makedirs(simulate_vms)
            process.run('touch %s' % simulate_dom_md)
            process.run('chmod -R 777 /tmp/rhv/')

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd_export_env = 'export LIBGUESTFS_BACKEND=direct'
            cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd)

        if params.get('cmd_free') == 'yes':
            cmd = params.get('check_command')
            # only set error to 'ignore' to avoid exception for RHEL7-84978
            if "guestfish" in cmd:
                error_flag = "replace"

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(),
                                              'tail_log')
            params['tail'] = aexpect.Tail(command='tail -f /var/log/messages',
                                          output_func=utils_misc.log_line,
                                          output_params=(params['tail_log'], ))
        cmd_result = process.run(cmd,
                                 timeout=v2v_timeout,
                                 verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except Exception:
                    logging.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True,
                                 pool_name='src_pool',
                                 pool_target='v2v_src_pool')
                    cleanup_pool(user_pool=True)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        if output_mode is None:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag:
            vmcheck = utils_v2v.VMCheck(test, params, env)
            vmcheck.cleanup()
        if checkpoint in ['with_ic', 'without_ic']:
            process.run(params['cmd_clean_vm'])
        if new_v2v_user:
            process.system("userdel -f %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
        if checkpoint == 'vmx':
            utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs')
            os.rmdir(params['mount_point'])
        if checkpoint == 'simulate_nfs':
            process.run('rm -rf /tmp/rhv/')
Esempio n. 39
0
def run(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        raise error.TestNAError("Host only has one NUMA node, "
                                "skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = 0
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size < node_mem:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    numa_node_malloc = -1
    most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if utils_memory.freememtotal() < tmpfs_size:
            raise error.TestError("Don't have enough memory to execute this "
                                  "test after %s round" % test_round)
        error.context("Executing stress test round: %s" % test_round,
                      logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error.context("Try to allocate memory in node %s" % numa_node_malloc,
                      logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            utils.system(numa_dd_cmd, timeout=dd_timeout)
        except Exception, error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                raise error.TestFail("Can not allocate memory in node %s."
                                     " Error message:%s" % (numa_node_malloc,
                                                            str(error_msg)))
        error.context("Run memory heavy stress in guest", logging.info)
        autotest_control.run(test, params, env)
        error.context("Get the qemu process memory use status", logging.info)
        node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            raise error.TestFail("Memory still stick in "
                                 "node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()
Esempio n. 40
0
    @param env: Dictionary with test environment.
    """
    mount_dest_dir = params.get('dst_dir', '/mnt/images')
    if not os.path.exists(mount_dest_dir):
        try:
            os.makedirs(mount_dest_dir)
        except OSError, err:
            logging.warning('mkdir %s error:\n%s', mount_dest_dir, err)

    if not os.path.exists(mount_dest_dir):
        raise error.TestError('Failed to create NFS share dir %s' %
                              mount_dest_dir)

    src = params.get('images_good')
    image = '%s.%s' % (os.path.split(
        params['image_name'])[1], params['image_format'])
    src_path = os.path.join(mount_dest_dir, image)
    dst_path = '%s.%s' % (params['image_name'], params['image_format'])
    cmd = 'cp %s %s' % (src_path, dst_path)

    if not utils_misc.mount(src, mount_dest_dir, 'nfs', 'ro'):
        raise error.TestError('Could not mount NFS share %s to %s' %
                              (src, mount_dest_dir))

    # Check the existence of source image
    if not os.path.exists(src_path):
        raise error.TestError('Could not find %s in NFS share' % src_path)

    logging.debug('Copying image %s...', image)
    utils.system(cmd)
Esempio n. 41
0
 def restore(self, name):
     info = name
     if not mount(info['src'], info['mount_point'], info['fstype'],
                  info['options'], verbose=False):
         raise Exception("Failed to mount %s" % info['mount_point'])
Esempio n. 42
0
def run(test, params, env):
    """
    Test when the PCI configuration file is in read-only mode
    """
    def test_vf_hotplug():
        """
        Hot-plug VF to VM

        """
        logging.info("Preparing a running guest...")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=180)

        logging.info("Attaching VF to the guest...")
        mac_addr = utils_net.generate_mac_address_simple()
        iface_dict = eval(
            params.get('iface_dict', '{"hostdev_addr": "%s"}') %
            utils_sriov.pci_to_addr(vf_pci))
        iface = interface.Interface("hostdev")
        iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict)
        virsh.attach_device(vm_name,
                            iface.xml,
                            debug=True,
                            ignore_status=False)

        logging.info("Checking VF in the guest...")
        vm_iface_types = [
            iface.get_type_name() for iface in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface")
        ]
        if 'hostdev' not in vm_iface_types:
            test.fail('Unable to get hostdev interface!')
        if cmd_in_vm:
            if not utils_misc.wait_for(
                    lambda: not vm_session.cmd_status(cmd_in_vm), 30, 10):
                test.fail("Can not get the Virtual Function info on vm!")
        vm_session.close()

    libvirt_version.is_libvirt_feature_supported(params)

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    cmd_in_vm = params.get("cmd_in_vm")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    default_vf = sriov_base.setup_vf(pf_pci, params)
    vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
    dev_name = utils_sriov.get_device_name(vf_pci)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd('virtqemud')
    try:
        virsh.nodedev_detach(dev_name, debug=True, ignore_status=False)
        logging.info("Re-mounting sysfs with ro mode...")
        utils_misc.mount('/sys', '', None, 'remount,ro')
        libvirtd.restart()
        run_test()
    finally:
        logging.info("Recover test enviroment.")
        utils_misc.mount('/sys', '', None, 'remount,rw')
        sriov_base.recover_vf(pf_pci, params, default_vf)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()
        virsh.nodedev_reattach(dev_name, debug=True)