Esempio n. 1
0
        def _when_target_libvirt(has_qxldod):
            # Since RHEL7.3(virt-v2v-1.32.1-1.el7), video model will change to
            # QXL for linux VMs
            if self.os_type == 'linux':
                video_model = 'qxl'
            elif self.os_version in ['win7', 'win2008r2']:
                video_model = 'qxl'
            # win11 and win2022 don't have qxldod driver in virtio-win, but they are unexpectedly
            # inspected to win10 and win2016 by v2v, so the qxl driver is installed by mistake.
            # this only affects RHEL8.
            elif self.os_version in [
                    'win10', 'win2016', 'win2019', 'win11', 'win2022'
            ] and has_qxldod:
                video_model = 'qxl'
            else:
                video_model = 'cirrus'

            # on slow train, video is set to 'cirrus' for windows guest.
            v2v_av_version = '[virt-v2v-1.42,)'
            if self.os_type == 'windows' and not utils_v2v.multiple_versions_compare(
                    v2v_av_version):
                LOG.debug('video is changed because of slow train')
                video_model = 'cirrus'

            return video_model
Esempio n. 2
0
def check_json_output(params):
    """
    Check -o json result
    """
    LOG.info('checking json output')

    os_directory = params.get('os_directory')
    disk_count = int(params.get('vm_disk_count', 0))
    vm_name = params.get('main_vm')
    json_disk_pattern = params.get('json_disk_pattern')

    result = True

    json_disk_dict = {'GuestName': vm_name, 'DiskDeviceName': '', 'DiskNo': 0}

    if json_disk_pattern:
        json_disk_pattern = json_disk_pattern.replace('%{', '{')
        json_disk_pattern = re.sub(r'%{(.*?)}', r'%%{{\g<1>}}',
                                   json_disk_pattern)

    # Checking all disks
    for i, c in enumerate(string.ascii_lowercase):
        if i == disk_count:
            break

        json_disk_dict.update({'DiskDeviceName': 'sd%s' % c})
        json_disk_dict.update({'DiskNo': '%d' % (i + 1)})

        disk_file_name = "%s-%s" % (vm_name, 'sd%s' % c)
        if json_disk_pattern:
            disk_file_name = json_disk_pattern.format(**json_disk_dict)
        disk_file = os.path.join(os_directory, disk_file_name)
        if not os.path.exists(disk_file):
            LOG.error('Not found %s' % disk_file)
            result = False

    # Check json file
    json_file = os.path.join(os_directory, '%s.json' % vm_name)
    if not os.path.exists(json_file):
        LOG.error('Not found %s' % json_file)
        result = False

    # Check content of the json file
    with open(json_file) as fp:
        vm = json.load(fp)
        if vm['name'] != vm_name or len(vm['disks']) != disk_count:
            LOG.error('Verify content failed in %s' % json_file)
            result = False

        if utils_v2v.multiple_versions_compare(
                V2V_ADAPTE_SPICE_REMOVAL_VER
        ) and vm['guestcaps']['video'] != 'vga':
            LOG.error('Verify video failed: actual value is %s' %
                      vm['guestcaps']['video'])
            result = False

    return result
Esempio n. 3
0
 def get_expect_graphic_type(self):
     """
     The graphic type in VM XML is different for different target.
     """
     # 'ori_graphic' only can be set when hypervisor is KVM. For Xen and
     # Esx, it will always be 'None' and 'vnc' will be set by default.
     graphic_type = self.params.get('ori_graphic', 'vnc')
     if utils_v2v.multiple_versions_compare(V2V_ADAPTE_SPICE_REMOVAL_VER):
         graphic_type = 'vnc'
     elif self.target == 'ovirt':
         graphic_type = 'spice'
     return graphic_type
Esempio n. 4
0
def check_qxl_warning(buf, os_type, os_version):
    """
    Check the qxl warning message
    :param buf: the v2v debug log
    :param os_type: the guest's os_type(linux, windows)
    :param os_version: the guest's os version
    """
    err_list = []
    virtio_win_ver = "[virtio-win-1.9.16-1,)"
    virtio_win_qxl_os = ['win2008r2', 'win7']
    virtio_win_qxldod_os = ['win10', 'win2016', 'win2019']
    unexpected_qxldod_os = ['win11', 'win2022']
    v2v_unsupport_qxl_ver = "[virt-v2v-1.45.96,)"

    # RHEL9 doesn't support qxl.
    if utils_v2v.multiple_versions_compare(v2v_unsupport_qxl_ver):
        return err_list

    # Skip qxl check for win11 and win2022. The virtio-win doesn't include
    # qxl driver for them. But because of bugs in libguestfs, they are inspected
    # to win10 and win2016 unexpectedly. so the qxl driver is installed by
    # mistake. I think the warning check can be skipped here. And the warning
    # is not that important, either.
    if os_type == 'windows' and os_version not in unexpected_qxldod_os:
        virtio_win_support_qxldod = utils_v2v.multiple_versions_compare(
            virtio_win_ver)
        qxl_warning = "there is no QXL driver for this version of Windows"
        if virtio_win_support_qxldod and os_version in virtio_win_qxldod_os:
            has_qxl_warning = False
        elif os_version in virtio_win_qxl_os:
            has_qxl_warning = False
        else:
            has_qxl_warning = True

        if has_qxl_warning and not re.search(qxl_warning, buf):
            err_list.append('Not find QXL warning')
        if not has_qxl_warning and re.search(qxl_warning, buf):
            err_list.append('Unexpected QXL warning')

    return err_list
Esempio n. 5
0
    def get_expect_video_model(self):
        """
        The video model in VM XML is different in different situation.
        """
        def _when_target_libvirt(has_qxldod):
            # Since RHEL7.3(virt-v2v-1.32.1-1.el7), video model will change to
            # QXL for linux VMs
            if self.os_type == 'linux':
                video_model = 'qxl'
            elif self.os_version in ['win7', 'win2008r2']:
                video_model = 'qxl'
            # win11 and win2022 don't have qxldod driver in virtio-win, but they are unexpectedly
            # inspected to win10 and win2016 by v2v, so the qxl driver is installed by mistake.
            # this only affects RHEL8.
            elif self.os_version in [
                    'win10', 'win2016', 'win2019', 'win11', 'win2022'
            ] and has_qxldod:
                video_model = 'qxl'
            else:
                video_model = 'cirrus'

            # on slow train, video is set to 'cirrus' for windows guest.
            v2v_av_version = '[virt-v2v-1.42,)'
            if self.os_type == 'windows' and not utils_v2v.multiple_versions_compare(
                    v2v_av_version):
                LOG.debug('video is changed because of slow train')
                video_model = 'cirrus'

            return video_model

        def _when_target_ovirt():
            # Video model will change to QXL if convert target is ovirt/RHEVM
            return 'qxl'

        # Default value
        video_model = 'cirrus'
        has_virtio_win, has_qxldod = self.get_virtio_win_config()
        # Video model will change to QXL if convert target is ovirt/RHEVM
        if self.target == 'ovirt':
            video_model = _when_target_ovirt()
        # Video model will change to QXL for Windows2008r2 and windows7
        if self.target == 'libvirt':
            video_model = _when_target_libvirt(has_qxldod)
        if not has_virtio_win:
            video_model = 'cirrus'

        if utils_v2v.multiple_versions_compare(V2V_ADAPTE_SPICE_REMOVAL_VER):
            video_model = 'vga'

        return video_model
Esempio n. 6
0
 def check_rhev_file_exist(vmcheck):
     """
     Check if rhev files exist
     """
     file_path = {
         'rhev-apt.exe': r'C:\rhev-apt.exe',
         'rhsrvany.exe':
         r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
     }
     # rhev-apt.ext is removed on rhel9
     if utils_v2v.multiple_versions_compare(V2V_UNSUPPORT_RHEV_APT_VER):
         file_path.pop('rhev-apt.exe')
     for key in file_path:
         status = vmcheck.session.cmd_status('dir %s' % file_path[key])
         if status == 0:
             logging.info('%s exists' % key)
         else:
             log_fail('%s does not exist after convert to rhv' % key)
Esempio n. 7
0
def run(test, params, env):
    """
    Basic nbdkit tests
    """
    checkpoint = params.get('checkpoint')
    version_requried = params.get('version_requried')

    def test_filter_stats_fd_leak():
        """
        check if nbdkit-stats-filter leaks an fd
        """
        tmp_logfile = os.path.join(data_dir.get_tmp_dir(), "nbdkit-test.log")
        cmd = """
nbdkit -U - --filter=log --filter=stats sh - \
  logfile=/dev/null statsfile=/dev/null \
  --run 'qemu-io -r -f raw -c "r 0 1" $nbd' <<\EOF
  case $1 in
    get_size) echo 1m;;
    pread) ls -l /proc/$$/fd > %s
      dd iflag=count_bytes count=$3 if=/dev/zero || exit 1 ;;
    *) exit 2 ;;
  esac
EOF
""" % tmp_logfile

        process.run(cmd, shell=True)

        count = 0
        with open(tmp_logfile) as fd:
            ptn = r'(\d+)\s+->\s+\'(pipe|socket):'
            cont = fd.read()
            logging.debug('all fds:\n%s', cont)
            for i, _ in re.findall(ptn, cont):
                if int(i) > 2:
                    count += 1
        if count > 0:
            test.fail('nbdkit-stats-filter leaks %d fd' % count)

    if version_requried and not multiple_versions_compare(version_requried):
        test.cancel("Testing requries version: %s" % version_requried)
    if checkpoint == 'filter_stats_fd_leak':
        test_filter_stats_fd_leak()
    else:
        test.error('Not found testcase: %s' % checkpoint)
    def get_virtio_win_config(self):
        """
        Return a value pair (virtio_win_installed, virtio_win_support_qxldod).
        If virtio_win_installed is true, virtio-win is installed or VIRTIO_WIN
        is set.
        If virtio_win_support_qxldod is true, qxldod drivers are included.
        """
        # see bz1902635
        virtio_win_ver = "[virtio-win-1.9.16,)"
        virtio_win_installed = os.path.exists(
            '/usr/share/virtio-win/virtio-win.iso')
        # virtio-win is not installed, but VIRTIO_WIN is set
        virtio_win_env = os.getenv('VIRTIO_WIN')
        virtio_win_support_qxldod = False

        if not virtio_win_installed:
            if virtio_win_env:
                # Users should assure VIRTIO_WIN is valid
                virtio_win_installed = True
                if os.path.isdir(virtio_win_env):
                    virtio_win_iso_dir = virtio_win_env
                    qxldods = glob.glob(
                        "%s/**/qxldod.inf" %
                        virtio_win_iso_dir, recursive=True)
                else:
                    with tempfile.TemporaryDirectory(prefix='v2v_helper_') as virtio_win_iso_dir:
                        process.run(
                            'mount %s %s' %
                            (virtio_win_env, virtio_win_iso_dir), shell=True)
                        qxldods = glob.glob(
                            "%s/**/qxldod.inf" %
                            virtio_win_iso_dir, recursive=True)
                        process.run(
                            'umount %s' %
                            (virtio_win_iso_dir),
                            shell=True)
                logging.debug('Found qxldods: %s', qxldods)
                if qxldods:
                    virtio_win_support_qxldod = True
        else:
            virtio_win_support_qxldod = utils_v2v.multiple_versions_compare(
                virtio_win_ver)

        return virtio_win_installed, virtio_win_support_qxldod
        def _when_target_libvirt(has_qxldod):
            # Since RHEL7.3(virt-v2v-1.32.1-1.el7), video model will change to
            # QXL for linux VMs
            if self.os_type == 'linux':
                video_model = 'qxl'
            elif self.os_version in ['win7', 'win2008r2']:
                video_model = 'qxl'
            elif self.os_version in ['win10', 'win2016', 'win2019'] and has_qxldod:
                video_model = 'qxl'
            else:
                video_model = 'cirrus'

            # on slow train, video is set to 'cirrus' for windows guest.
            v2v_av_version = '[virt-v2v-1.42,)'
            if self.os_type == 'windows' and not utils_v2v.multiple_versions_compare(
                    v2v_av_version):
                logging.debug('video is changed because of slow train')
                video_model = 'cirrus'

            return video_model
Esempio n. 10
0
    def test_memory_max_disk_size():
        """
        check case for bz1913740
        """
        if multiple_versions_compare('[qemu-kvm-5.2.0-3,)'):
            mem_size = "2**63 - 2**30"
            invalid_mem_size = "2**63 - 2**30 + 1"
        else:
            mem_size = "2**63 - 512"
            invalid_mem_size = "2**63 - 512 + 1"

        cmd = 'nbdkit memory $((%s)) --run \'qemu-img info "$uri"\'' % mem_size
        cmd_result = process.run(cmd, shell=True, ignore_status=True)
        if cmd_result.exit_status != 0:
            test.fail('failed to test memory_max_disk_size')

        cmd = 'nbdkit memory $((%s)) --run \'qemu-img info "$uri"\'' % invalid_mem_size
        expected_msg = 'File too large'
        cmd_result = process.run(cmd, shell=True, ignore_status=True)
        if cmd_result.exit_status == 0 or expected_msg in cmd_result.stdout_text:
            test.fail('failed to test memory_max_disk_size')
Esempio n. 11
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    version_requried = params.get("version_requried")
    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    input_file = params.get("input_file")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    source_user = params.get("username", "root")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mnt_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')
    error_flag = 'strict'
    estimate_file = ''

    def create_pool(user_pool=False,
                    pool_name=pool_name,
                    pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir -p %s'" % target_path
            process.system(cmd, verbose=True)
            # Sometimes pool_creat_as returns sucess, but the pool can
            # not be found in user session.
            virsh.pool_create_as(pool_name,
                                 'dir',
                                 target_path,
                                 unprivileged_user=v2v_user,
                                 debug=True)

            res = virsh.pool_info(pool_name,
                                  unprivileged_user=v2v_user,
                                  debug=True)
            if res.exit_status != 0:
                return False
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        return True

    def cleanup_pool(user_pool=False,
                     pool_name=pool_name,
                     pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            virsh.pool_destroy(pool_name,
                               unprivileged_user=v2v_user,
                               debug=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            test.error("Fail to find tmp target file name when converting vm"
                       " disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = to_text(
                process.system_output("grep -R \"%s\" %s" %
                                      (ovf_id, export_vm_dir)))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            test.fail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            test.error("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    test.fail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    test.fail("%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                test.fail("%s format is not %s" % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            test.fail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            test.fail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            test.fail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            logging.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                test.fail('vm_snapshot_id consists with "0"')
        else:
            test.fail('Fail to find snapshot_id')

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i - 1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        logging.debug('Source info to check: %s', source_info)
        checklist = [
            'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks',
            'NICs'
        ]
        if hypervisor in ['kvm', 'xen']:
            checklist.extend(['display', 'CPU features'])
        for key in checklist:
            if key not in source_info:
                test.fail('%s info missing' % key)

        v2v_virsh = None
        close_virsh = False
        if hypervisor == 'kvm':
            v2v_virsh = virsh
        else:
            virsh_dargs = {
                'uri': ic_uri,
                'remote_ip': remote_host,
                'remote_user': source_user,
                'remote_pwd': source_pwd,
                'auto_close': True,
                'debug': True
            }
            v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
            logging.debug('a new virsh session %s was created', v2v_virsh)
            close_virsh = True

        # Check single values
        fail = []
        try:
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
        finally:
            if close_virsh:
                logging.debug('virsh session %s is closing', v2v_virsh)
                v2v_virsh.close_session()

        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'

        if hypervisor in ['kvm', 'xen']:
            check_map['display'] = xml.get_graphics_devices()[0].type_name

        logging.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            logging.info('%-15s:%18s <-> %s', key, source_info[key],
                         check_map[key])
            if str(check_map[key]) not in source_info[key]:
                fail.append(key)

        # Check disk info
        disk = list(xml.get_disk_all_by_expr('device==disk').values())[0]

        def _get_disk_subelement_attr_value(obj, attr, subattr):
            if obj.find(attr) is not None:
                return obj.find(attr).get(subattr)

        bus = _get_disk_subelement_attr_value(disk, 'target', 'bus')
        driver_type = _get_disk_subelement_attr_value(disk, 'driver', 'type')
        path = _get_disk_subelement_attr_value(disk, 'source', 'file')

        # For esx, disk output is like "disks: json: { ... } (raw) [scsi]"
        # For xen, disk output is like "disks: json: { ... } [ide]"
        # For kvm, disk output is like "/rhel8.0-2.qcow2 (qcow2) [virtio-blk]"
        if hypervisor == 'kvm':
            disks_info_pattern = r"%s \(%s\) \[%s" % (path, driver_type, bus)
        elif hypervisor == 'esx':
            # replace '.vmdk' with '-flat.vmdk', this is done in v2v
            path_pattern1 = path.split()[1].replace('.vmdk', '-flat.vmdk')
            # In newer qemu version, '_' is replaced with '%5f'.
            path_pattern2 = path_pattern1.replace('_', '%5f')
            # nbd:unix:/tmp/v2vnbdkit.u44G6C/nbdkit1.sock:exportname=/ (raw)
            # [scsi]
            path_pattern_nbd = r'nbd:unix:/.*? \(raw\) \[%s\]' % bus
            # For esx, '(raw)' is fixed? Let's see if others will be met.
            disks_info_pattern = '|'.join([path_pattern_nbd] + [
                r"https://%s/folder/%s\?dcPath=data&dsName=esx.*} \(raw\) \[%s"
                % (remote_host, i, bus)
                for i in [path_pattern1, path_pattern2]
            ])
        elif hypervisor == 'xen':
            disks_info_pattern = "file\.path.*%s.*file\.host.*%s.* \[%s" % (
                path, remote_host, bus)

        source_disks = source_info['disks'].split()
        logging.info('disks:%s<->%s', source_info['disks'], disks_info_pattern)
        if not re.search(disks_info_pattern, source_info['disks']):
            fail.append('disks')

        # Check nic info
        nic = list(xml.get_iface_all().values())[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        logging.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if nic_info.lower() not in source_info['NICs'].lower():
            fail.append('NICs')

        # Check cpu features
        if hypervisor in ['kvm', 'xen']:
            feature_list = xml.features.get_feature_list()
            logging.info('CPU features:%s<->%s', source_info['CPU features'],
                         feature_list)
            if sorted(source_info['CPU features'].split(',')) != sorted(
                    feature_list):
                fail.append('CPU features')

        if fail:
            test.fail('Source info not correct for: %s' % fail)

    def check_man_page(in_man, not_in_man):
        """
        Check if content of man page or help info meets expectation
        """
        man_page = process.run('man virt-v2v',
                               verbose=False).stdout_text.strip()
        if in_man:
            logging.info('Checking man page of virt-v2v for "%s"', in_man)
            if in_man not in man_page:
                test.fail('"%s" not in man page' % in_man)
        if not_in_man:
            logging.info('Checking man page of virt-v2v for "%s"', not_in_man)
            if not_in_man in man_page:
                test.fail('"%s" not removed from man page' % not_in_man)

    def check_print_estimate(estimate_file):
        """
        Check disk size and total size in file of estimate created by v2v
        """
        import json

        content = None
        buf = ''
        with open(estimate_file) as fp:
            all_content = fp.read()
            fp.seek(0)
            for line in fp:
                buf += line
                if '}' not in line:
                    continue
                if 'disks' in buf and 'total' in buf:
                    content = json.loads(buf)
                    break
                buf = ''

        logging.debug('json file content:\n%s' % all_content)
        if not content or sum(content['disks']) != content['total']:
            test.fail("The disks' size doesn't same as total value")

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    # 76 is the max length in v2v
                    if v2v_start and len(line) > 76:
                        test.fail('Error log longer than 76 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root:
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    vmchecker = VMChecker(test, params, env)
                    params['vmchecker'] = vmchecker
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint in ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                test.fail(
                                    '%s not in --machine-readable output' %
                                    line.strip())
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
            if checkpoint == 'print_estimate_tofile':
                check_print_estimate(estimate_file)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if version_requried and not utils_v2v.multiple_versions_compare(
                version_requried):
            test.cancel("Testing requries version: %s" % version_requried)

        if hypervisor == "xen":
            # See man virt-v2v-input-xen(1)
            process.run('update-crypto-policies --set LEGACY',
                        verbose=True,
                        ignore_status=True,
                        shell=True)

        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            logging.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            logging.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            # Remote libvirt connection is not offically supported by
            # v2v and may fail. Just use localhost to simulate a remote
            # connection to test the warnings.
            if checkpoint == 'remote_libvirt_conn':
                ic_uri = 'qemu+ssh://localhost/system'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
                input_option = "-i ova %s -ic %s -of qcow2" % (input_file,
                                                               ic_uri)
            if checkpoint == 'without_ic':
                input_option = "-i ova %s -of raw" % input_file
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            test.cancel("Unsupported input mode: %s" % input_mode)
        else:
            test.error("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s" % output_mode
            if output_mode != 'null':
                output_option += " -os %s" % output_storage
            if checkpoint == 'rhv':
                output_option = output_option.replace('rhev', 'rhv')
            if checkpoint in ['with_ic', 'without_ic']:
                output_option = output_option.replace('v2v_dir', 'src_pool')
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                test.error("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

            if output_mode == 'rhev':
                # create different sasl_user name for different job
                params.update({
                    'sasl_user':
                    params.get("sasl_user") +
                    utils_misc.generate_random_string(3)
                })
                logging.info('sals user name is %s' % params.get("sasl_user"))

                user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                               params.get("sasl_pwd"))
                v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
                v2v_sasl.server_ip = params.get("remote_ip")
                v2v_sasl.server_user = params.get('remote_user')
                v2v_sasl.server_pwd = params.get('remote_pwd')
                v2v_sasl.setup(remote=True)
                logging.debug('A SASL session %s was created', v2v_sasl)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in [
                'length_of_error', 'empty_nic_source_network',
                'empty_nic_source_bridge', 'machine_readable'
        ]:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
            new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from souce and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         os.path.basename(disk_img))
                logging.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = input_option.replace(disk_img, disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                test.cancel("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            source_pwd = passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
                remote_host, user, passwd, auto_close=False)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                logging.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            source_pwd = vpx_passwd = params.get("vpx_password")
            vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(),
                                           "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " -ip %s" % vpx_passwd_file
            # rhel8 slow stream doesn't support option 'ip' temporarily
            # so use option 'password-file' instead.
            if not utils_v2v.v2v_supported_option("-ip <filename>"):
                output_option = output_option.replace('-ip', '--password-file',
                                                      1)

        # if don't specify any output option for virt-v2v, 'default' pool
        # will be used.
        if output_mode is None:
            # Cleanup first to avoid failure if 'default' pool exists.
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        # Create libvirt dir pool
        if output_mode == "libvirt":
            utils_misc.wait_for(create_pool, timeout=30, step=3)

        # Work around till bug fixed
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            utils_misc.wait_for(
                lambda: create_pool(user_pool=True,
                                    pool_name='src_pool',
                                    pool_target='v2v_src_pool'),
                timeout=30,
                step=3)

        if checkpoint == 'vmx':
            mount_point = params.get('mount_point')
            if not os.path.isdir(mount_point):
                os.mkdir(mount_point)
            nfs_vmx = params.get('nfs_vmx')
            if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True):
                test.error('Mount nfs for vmx failed')
            vmx = params.get('vmx')
            input_option = '-i vmx %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'vmx_ssh':
            esx_user = params.get("esx_host_user", "root")
            esx_pwd = params.get("esx_host_passwd")
            vmx = params.get('vmx')
            esx_pubkey, esx_session = utils_v2v.v2v_setup_ssh_key(
                esx_ip, esx_user, esx_pwd, server_type='esx', auto_close=False)
            utils_misc.add_identities_into_ssh_agent()
            input_option = '-i vmx -it ssh %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'simulate_nfs':
            simulate_images = params.get("simu_images_path")
            simulate_vms = params.get("simu_vms_path")
            simulate_dom_md = params.get("simu_dom_md_path")
            os.makedirs(simulate_images)
            os.makedirs(simulate_vms)
            process.run('touch %s' % simulate_dom_md)
            process.run('chmod -R 777 /tmp/rhv/')

        if checkpoint == 'print_estimate_tofile':
            estimate_file = utils_misc.generate_tmp_file_name(
                'v2v_print_estimate')
            v2v_options += " --machine-readable=file:%s" % estimate_file

        if checkpoint == 'remote_libvirt_conn':
            # Add localhost to known_hosts
            cmd = 'ssh-keyscan -t ecdsa localhost >> ~/.ssh/known_hosts'
            process.run(cmd, shell=True)
            # Setup remote login without password
            public_key = ssh_key.get_public_key().rstrip()
            cmd = 'echo "%s" >> ~/.ssh/authorized_keys' % public_key
            process.run(cmd, shell=True)

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd_export_env = 'export LIBGUESTFS_BACKEND=direct'
            cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd)

        if params.get('cmd_free') == 'yes':
            cmd = params.get('check_command')
            # only set error to 'ignore' to avoid exception for RHEL7-84978
            if "guestfish" in cmd:
                error_flag = "replace"

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(),
                                              'tail_log')
            params['tail'] = aexpect.Tail(command='tail -f /var/log/messages',
                                          output_func=utils_misc.log_line,
                                          output_params=(params['tail_log'], ))
        cmd_result = process.run(cmd,
                                 timeout=v2v_timeout,
                                 verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except Exception:
                    logging.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True,
                                 pool_name='src_pool',
                                 pool_target='v2v_src_pool')
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        if output_mode is None:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag and params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if new_v2v_user:
            process.system("userdel -fr %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if checkpoint == 'vmx':
            utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs')
            os.rmdir(params['mount_point'])
        if checkpoint == 'vmx_ssh':
            utils_v2v.v2v_setup_ssh_key_cleanup(esx_session, esx_pubkey, 'esx')
            process.run("ssh-agent -k")
        if checkpoint == 'simulate_nfs':
            process.run('rm -rf /tmp/rhv/')
        if os.path.exists(estimate_file):
            os.remove(estimate_file)
        if hypervisor == "xen":
            # Restore crypto-policies to DEFAULT, the setting is impossible to be
            # other values by default in testing envrionment.
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        if checkpoint == 'remote_libvirt_conn':
            cmd = r"sed -i '/localhost/d' ~/.ssh/known_hosts"
            process.run(cmd, shell=True, ignore_status=True)
            if locals().get('public_key'):
                key = public_key.rstrip().split()[1].split('/')[0]
                cmd = r"sed -i '/%s/d' ~/.ssh/authorized_keys" % key
                process.run(cmd, shell=True, ignore_status=True)
Esempio n. 12
0
    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            LOG.debug(vmchecker.vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removable devices(floppy,
                # cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'], vmchecker.vmxml)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmchecker.xmltree.find(
                        './devices/video/model').get('type')
                    if utils_v2v.multiple_versions_compare(
                            V2V_ADAPTE_SPICE_REMOVAL_VER):
                        expect_video_type = 'vga'
                    else:
                        expect_video_type = 'qxl'

                    if video_type.lower() != expect_video_type:
                        log_fail('Video expect %s, actual %s' %
                                 (expect_video_type, video_type))
            if checkpoint.startswith('listen'):
                listen_type = vmchecker.xmltree.find(
                    './devices/graphics/listen').get('type')
                LOG.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                LOG.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'check_selinuxtype':
                expect_output = vmchecker.checker.session.cmd(
                    'cat /etc/selinux/config')
                expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$',
                                               expect_output,
                                               re.MULTILINE).group(1)
                actual_output = vmchecker.checker.session.cmd('sestatus')
                actual_selinuxtype = re.search(
                    r'^Loaded policy name:\s*(\S+)$', actual_output,
                    re.MULTILINE).group(1)
                if actual_selinuxtype != expect_selinuxtype:
                    log_fail('Seliunx type not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))
Esempio n. 13
0
    def check_windows_vm(self):
        """
        Check windows guest after v2v convert.
        """
        try:
            # Sometimes windows guests needs >10mins to finish drivers
            # installation
            self.checker.create_session(timeout=900)
        except Exception as detail:
            raise exceptions.TestError(
                'Failed to connect to windows guest: %s' % detail)
        logging.info("Wait 60 seconds for installing drivers")
        time.sleep(60)
        # Close and re-create session in case connection reset by peer during
        # sleeping time. Keep trying until the test command runs successfully.
        for retry in range(RETRY_TIMES):
            try:
                self.checker.run_cmd('dir')
            except BaseException:
                self.checker.session.close()
                self.checker.session = None
                self.checker.create_session()
            else:
                break

        # Check boottype of the guest
        self.check_vm_boottype()

        # Check viostor file
        logging.info("Checking windows viostor info")
        output = self.checker.get_viostor_info()
        if not output:
            err_msg = "Not find viostor info"
            self.log_err(err_msg)

        # Check Red Hat VirtIO drivers and display adapter
        logging.info("Checking VirtIO drivers and display adapter")
        expect_drivers = [
            "Red Hat VirtIO SCSI", "Red Hat VirtIO Ethernet Adapte"
        ]
        # see bz1902635
        virtio_win_ver = "[virtio-win-1.9.16-1,)"
        virtio_win_qxl_os = ['win2008r2', 'win7']
        virtio_win_qxldod_os = ['win10', 'win2016', 'win2019']
        virtio_win_support_qxldod = utils_v2v.multiple_versions_compare(
            virtio_win_ver)
        if virtio_win_support_qxldod and self.os_version in virtio_win_qxldod_os:
            expect_adapter = 'Red Hat QXL controller'
        elif self.os_version in virtio_win_qxl_os:
            expect_adapter = 'Red Hat QXL GPU'
        else:
            expect_adapter = 'Microsoft Basic Display Driver'

        expect_drivers.append(expect_adapter)
        check_drivers = expect_drivers[:]
        for check_times in range(10):
            logging.info('Check drivers for the %dth time', check_times + 1)
            # Windows VM may reboot after drivers are installed, a fresh
            # session should be created to avoid using inavlid session.
            self.checker.session.close()
            self.checker.session = None
            self.checker.create_session(timeout=900)
            win_dirvers = self.checker.get_driver_info()
            for driver in expect_drivers:
                if driver in win_dirvers:
                    logging.info("Driver %s found", driver)
                    check_drivers.remove(driver)
                else:
                    err_msg = "Driver %s not found" % driver
                    logging.error(err_msg)
            expect_drivers = check_drivers[:]
            if not expect_drivers:
                break
            else:
                wait = 60
                logging.info('Wait another %d seconds...', wait)
                time.sleep(wait)
        if expect_drivers:
            for driver in expect_drivers:
                self.log_err("Not find driver: %s" % driver)

        # Check graphic and video type in VM XML
        if compare_version(V2V_7_3_VERSION):
            self.check_vm_xml()
Esempio n. 14
0
def run(test, params, env):
    """
    Basic nbdkit tests
    """
    checkpoint = params.get('checkpoint')
    version_requried = params.get('version_requried')

    def test_filter_stats_fd_leak():
        """
        check if nbdkit-stats-filter leaks an fd
        """
        tmp_logfile = os.path.join(data_dir.get_tmp_dir(), "nbdkit-test.log")
        cmd = """
nbdkit -U - --filter=log --filter=stats sh - \
  logfile=/dev/null statsfile=/dev/null \
  --run 'qemu-io -r -f raw -c "r 0 1" $nbd' <<\EOF
  case $1 in
    get_size) echo 1m;;
    pread) ls -l /proc/$$/fd > %s
      dd iflag=count_bytes count=$3 if=/dev/zero || exit 1 ;;
    *) exit 2 ;;
  esac
EOF
""" % tmp_logfile

        process.run(cmd, shell=True)

        count = 0
        with open(tmp_logfile) as fd:
            ptn = r'(\d+)\s+->\s+\'(pipe|socket):'
            cont = fd.read()
            logging.debug('all fds:\n%s', cont)
            for i, _ in re.findall(ptn, cont):
                if int(i) > 2:
                    count += 1
        if count > 0:
            test.fail('nbdkit-stats-filter leaks %d fd' % count)

    def test_has_run_againt_vddk7_0():
        """
        check if nbdkit --run + vddk + esx7.0 works.
        """
        from virttest.utils_pyvmomi import VSphereConnection, vim

        vm_name = params_get(params, "main_vm")
        if not vm_name:
            test.error('No VM specified')
        # vsphere server's host name or IP address
        vsphere_host = params_get(params, "vsphere_host")
        vsphere_user = params_get(params, "vsphere_user", 'root')
        # vsphere password
        vsphere_pwd = params_get(params, "vsphere_pwd")
        vsphere_passwd_file = params_get(
            params, "vpx_passwd_file", '/tmp/v2v_vpx_passwd')
        with open(vsphere_passwd_file, 'w') as fd:
            fd.write(vsphere_pwd)

        # get vm and file's value
        connect_args = {
            'host': vsphere_host,
            'user': vsphere_user,
            'pwd': vsphere_pwd}
        with VSphereConnection(**connect_args) as conn:
            conn.target_vm = vm_name
            nbdkit_vm_name = 'moref=' + \
                str(conn.target_vm).strip('\'').split(':')[1]
            nbdkit_file = conn.get_hardware_devices(
                dev_type=vim.vm.device.VirtualDisk)[0].backing.fileName

        # vddk_libdir
        vddk_libdir_src = params_get(params, "vddk_libdir_src")
        with tempfile.TemporaryDirectory(prefix='vddklib_') as vddk_libdir:
            utils_misc.mount(vddk_libdir_src, vddk_libdir, 'nfs')
            vddk_thumbprint = '11'
            nbdkit_cmd = """
nbdkit -rfv -U - --exportname / \
  --filter=cacheextents --filter=retry vddk server=%s user=%s password=+%s vm=%s \
  file='%s' libdir=%s --run 'qemu-img info $nbd' thumbprint=%s
""" % (vsphere_host, vsphere_user, vsphere_passwd_file, nbdkit_vm_name, nbdkit_file, vddk_libdir, vddk_thumbprint)
            # get thumbprint by a trick
            cmd_result = process.run(
                nbdkit_cmd, shell=True, ignore_status=True)
            output = cmd_result.stdout_text + cmd_result.stderr_text
            vddk_thumbprint = re.search(
                r'PeerThumbprint:\s+(.*)', output).group(1)

            # replace thumbprint with correct value
            nbdkit_cmd = nbdkit_cmd.strip()[:-2] + vddk_thumbprint
            logging.info('nbdkit command:\n%s', nbdkit_cmd)

            # Run the finnal nbdkit command
            output = process.run(nbdkit_cmd, shell=True).stdout_text
            utils_misc.umount(vddk_libdir_src, vddk_libdir, 'nfs')
            if not re.search(r'virtual size', output):
                test.fail('failed to test has_run_againt_vddk7_0')

    if version_requried and not multiple_versions_compare(
            version_requried):
        test.cancel("Testing requries version: %s" % version_requried)

    if checkpoint == 'filter_stats_fd_leak':
        test_filter_stats_fd_leak()
    elif checkpoint == 'has_run_againt_vddk7_0':
        test_has_run_againt_vddk7_0()
    else:
        test.error('Not found testcase: %s' % checkpoint)
Esempio n. 15
0
def run(test, params, env):
    """
    Basic nbdkit tests
    """
    checkpoint = params.get('checkpoint')
    version_required = params.get('version_required')

    def test_filter_stats_fd_leak():
        """
        check if nbdkit-stats-filter leaks an fd
        """
        tmp_logfile = os.path.join(data_dir.get_tmp_dir(), "nbdkit-test.log")
        cmd = """
nbdkit -U - --filter=log --filter=stats sh - \
  logfile=/dev/null statsfile=/dev/null \
  --run 'qemu-io -r -f raw -c "r 0 1" $nbd' <<\EOF
  case $1 in
    get_size) echo 1m;;
    pread) ls -l /proc/$$/fd > %s
      dd iflag=count_bytes count=$3 if=/dev/zero || exit 1 ;;
    *) exit 2 ;;
  esac
EOF
""" % tmp_logfile

        process.run(cmd, shell=True)

        count = 0
        with open(tmp_logfile) as fd:
            ptn = r'(\d+)\s+->\s+\'(pipe|socket):'
            cont = fd.read()
            LOG.debug('all fds:\n%s', cont)
            for i, _ in re.findall(ptn, cont):
                if int(i) > 2:
                    count += 1
        if count > 0:
            test.fail('nbdkit-stats-filter leaks %d fd' % count)

    def test_has_run_againt_vddk7_0():
        """
        check if nbdkit --run + vddk + esx7.0 works.
        """
        from virttest.utils_pyvmomi import VSphereConnection, vim

        vm_name = params_get(params, "main_vm")
        if not vm_name:
            test.error('No VM specified')
        # vsphere server's host name or IP address
        vsphere_host = params_get(params, "vsphere_host")
        vsphere_user = params_get(params, "vsphere_user", 'root')
        # vsphere password
        vsphere_pwd = params_get(params, "vsphere_pwd")
        vsphere_passwd_file = params_get(
            params, "vpx_passwd_file", '/tmp/v2v_vpx_passwd')
        with open(vsphere_passwd_file, 'w') as fd:
            fd.write(vsphere_pwd)

        # get vm and file's value
        connect_args = {
            'host': vsphere_host,
            'user': vsphere_user,
            'pwd': vsphere_pwd}
        with VSphereConnection(**connect_args) as conn:
            conn.target_vm = vm_name
            nbdkit_vm_name = 'moref=' + \
                str(conn.target_vm).strip('\'').split(':')[1]
            nbdkit_file = conn.get_hardware_devices(
                dev_type=vim.vm.device.VirtualDisk)[0].backing.fileName

        # vddk_libdir
        vddk_libdir_src = params_get(params, "vddk_libdir_src")
        with tempfile.TemporaryDirectory(prefix='vddklib_') as vddk_libdir:
            utils_misc.mount(vddk_libdir_src, vddk_libdir, 'nfs')
            vddk_thumbprint = '11'
            nbdkit_cmd = """
nbdkit -rfv -U - --exportname / \
  --filter=cacheextents --filter=retry vddk server=%s user=%s password=+%s vm=%s \
  file='%s' libdir=%s --run 'qemu-img info $nbd' thumbprint=%s
""" % (vsphere_host, vsphere_user, vsphere_passwd_file, nbdkit_vm_name, nbdkit_file, vddk_libdir, vddk_thumbprint)
            # get thumbprint by a trick
            cmd_result = process.run(
                nbdkit_cmd, shell=True, ignore_status=True)
            output = cmd_result.stdout_text + cmd_result.stderr_text
            vddk_thumbprint = re.search(
                r'PeerThumbprint:\s+(.*)', output).group(1)

            # replace thumbprint with correct value
            nbdkit_cmd = nbdkit_cmd.strip()[:-2] + vddk_thumbprint
            LOG.info('nbdkit command:\n%s', nbdkit_cmd)

            if checkpoint == 'vddk_stats':
                vddk_stats = params_get(params, "vddk_stats")
                nbdkit_cmd = nbdkit_cmd + ' -D vddk.stats=%s' % vddk_stats
                LOG.info('nbdkit command with -D option:\n%s', nbdkit_cmd)

            # Run the final nbdkit command
            output = process.run(nbdkit_cmd, shell=True).stdout_text
            utils_misc.umount(vddk_libdir_src, vddk_libdir, 'nfs')
            if checkpoint == 'vddk_stats':
                if vddk_stats == 1 and not re.search(
                        r'VDDK function stats', output):
                    test.fail('failed to test vddk_stats')
                if vddk_stats == 0 and re.search(
                        r'VDDK function stats', output):
                    test.fail('failed to test vddk_stats')
            if checkpoint == 'has_run_againt_vddk7_0' and not re.search(
                    r'virtual size', output):
                test.fail('failed to test has_run_againt_vddk7_0')

    def test_memory_max_disk_size():
        """
        check case for bz1913740
        """
        if multiple_versions_compare('[qemu-kvm-5.2.0-3,)'):
            mem_size = "2**63 - 2**30"
            invalid_mem_size = "2**63 - 2**30 + 1"
        else:
            mem_size = "2**63 - 512"
            invalid_mem_size = "2**63 - 512 + 1"

        cmd = 'nbdkit memory $((%s)) --run \'qemu-img info "$uri"\'' % mem_size
        cmd_result = process.run(cmd, shell=True, ignore_status=True)
        if cmd_result.exit_status != 0:
            test.fail('failed to test memory_max_disk_size')

        cmd = 'nbdkit memory $((%s)) --run \'qemu-img info "$uri"\'' % invalid_mem_size
        expected_msg = 'File too large'
        cmd_result = process.run(cmd, shell=True, ignore_status=True)
        if cmd_result.exit_status == 0 or expected_msg in cmd_result.stdout_text:
            test.fail('failed to test memory_max_disk_size')

    def test_data_corruption():
        """
        check case for bz1990134
        """
        cmd = """nbdkit --filter=cow data "33 * 100000" --run 'nbdsh -u $uri -c "h.trim(100000, 0)" ; nbdcopy $uri - | hexdump -C'"""
        cmd_result = process.run(cmd, shell=True, ignore_status=True)
        if cmd_result.exit_status != 0 or '21 21' in cmd_result.stdout_text:
            test.fail('failed to test data_corruption')

    def test_cve_2019_14850():
        """
        check case for bz1757263
        """
        cred_dict = {
            'cakey': 'ca-key.pem',
            'cacert': 'ca-cert.pem',
            'serverkey': 'server-key.pem',
            'servercert': 'server-cert.pem'}
        tls_dir = data_dir.get_tmp_dir()
        build_CA(tls_dir, credential_dict=cred_dict)
        build_server_key(
            tls_dir,
            credential_dict=cred_dict,
            server_ip='127.0.0.0')
        cmd1 = """echo | nbdkit -fv --tls=require --tls-certificates=/root null --run "nc localhost 10809" --tls-verify-peer"""
        cmd2 = "nbdkit -fv null --run 'sleep 1 >/dev/tcp/localhost/10809' 2>&1"
        for cmd in [cmd1, cmd2]:
            cmd_result = process.run(cmd, shell=True, ignore_status=True)
            if 'open readonly' in cmd_result.stdout_text:
                test.fail('failed to test cve_2019_14850')

    if version_required and not multiple_versions_compare(
            version_required):
        test.cancel("Testing requires version: %s" % version_required)

    if checkpoint == 'filter_stats_fd_leak':
        test_filter_stats_fd_leak()
    elif checkpoint in ['has_run_againt_vddk7_0', 'vddk_stats']:
        test_has_run_againt_vddk7_0()
    elif checkpoint == 'memory_max_disk_size':
        test_memory_max_disk_size()
    elif checkpoint == 'data_corruption':
        test_data_corruption()
    elif checkpoint == 'cve_2019_14850':
        test_cve_2019_14850()
    else:
        test.error('Not found testcase: %s' % checkpoint)
Esempio n. 16
0
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    V2V_UNSUPPORT_RHEV_APT_VER = "[virt-v2v-1.43.3-4.el9,)"
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                check_windows_signature(vmchecker.checker, r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint:
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
                virsh_session.close()
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
Esempio n. 17
0
    def check_windows_vm(self):
        """
        Check windows guest after v2v convert.
        """
        try:
            # Sometimes windows guests needs >10mins to finish drivers
            # installation
            self.checker.create_session(timeout=900)
        except Exception as detail:
            raise exceptions.TestError(
                'Failed to connect to windows guest: %s' %
                detail)
        logging.info("Wait 60 seconds for installing drivers")
        time.sleep(60)
        # Close and re-create session in case connection reset by peer during
        # sleeping time. Keep trying until the test command runs successfully.
        for retry in range(RETRY_TIMES):
            try:
                self.checker.run_cmd('dir')
            except BaseException:
                self.checker.session.close()
                self.checker.session = None
                self.checker.create_session()
            else:
                break

        # Check boottype of the guest
        self.check_vm_boottype()

        # Check viostor file
        logging.info("Checking windows viostor info")
        output = self.checker.get_viostor_info()
        if not output:
            err_msg = "Not find viostor info"
            self.log_err(err_msg)

        # Check Red Hat VirtIO drivers and display adapter
        logging.info("Checking VirtIO drivers and display adapter")
        expect_drivers = ["Red Hat VirtIO SCSI",
                          "Red Hat VirtIO Ethernet Adapte"]
        # see bz1902635
        virtio_win_ver = "[virtio-win-1.9.16,)"
        virtio_win_qxl_os = ['win2008r2', 'win7']
        virtio_win_qxldod_os = ['win10', 'win2016', 'win2019']
        virtio_win_installed = os.path.exists(
            '/usr/share/virtio-win/virtio-win.iso')
        # virtio-win is not installed, but VIRTIO_WIN is set
        virtio_win_env = os.getenv('VIRTIO_WIN')

        expect_adapter = 'Microsoft Basic Display Driver'
        if not virtio_win_installed:
            if virtio_win_env:
                if os.path.isdir(virtio_win_env):
                    virtio_win_iso_dir = virtio_win_env
                    qxldods = glob.glob(
                        "%s/**/qxldod.inf" %
                        virtio_win_iso_dir, recursive=True)
                else:
                    with tempfile.TemporaryDirectory(prefix='v2v_helper_') as virtio_win_iso_dir:
                        process.run(
                            'mount %s %s' %
                            (virtio_win_env, virtio_win_iso_dir), shell=True)
                        qxldods = glob.glob(
                            "%s/**/qxldod.inf" %
                            virtio_win_iso_dir, recursive=True)
                        process.run(
                            'umount %s' %
                            (virtio_win_iso_dir),
                            shell=True)
                logging.debug('Found qxldods: %s', qxldods)
                if qxldods:
                    virtio_win_support_qxldod = True
                    virtio_win_installed = True
        else:
            virtio_win_support_qxldod = utils_v2v.multiple_versions_compare(
                virtio_win_ver)

        if virtio_win_installed:
            if virtio_win_support_qxldod and self.os_version in virtio_win_qxldod_os:
                expect_adapter = 'Red Hat QXL controller'
            elif self.os_version in virtio_win_qxl_os:
                expect_adapter = 'Red Hat QXL GPU'

        expect_drivers.append(expect_adapter)
        check_drivers = expect_drivers[:]
        for check_times in range(10):
            logging.info('Check drivers for the %dth time', check_times + 1)
            # Windows VM may reboot after drivers are installed, a fresh
            # session should be created to avoid using inavlid session.
            self.checker.session.close()
            self.checker.session = None
            self.checker.create_session(timeout=900)
            win_dirvers = self.checker.get_driver_info()
            for driver in expect_drivers:
                if driver in win_dirvers:
                    logging.info("Driver %s found", driver)
                    check_drivers.remove(driver)
                else:
                    err_msg = "Driver %s not found" % driver
                    logging.error(err_msg)
            expect_drivers = check_drivers[:]
            if not expect_drivers:
                break
            else:
                wait = 60
                logging.info('Wait another %d seconds...', wait)
                time.sleep(wait)
        if expect_drivers:
            for driver in expect_drivers:
                self.log_err("Not find driver: %s" % driver)

        # Check graphic and video type in VM XML
        if compare_version(V2V_7_3_VERSION):
            self.check_vm_xml()
Esempio n. 18
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    version_required = params.get("version_required")
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    os_pool = storage = params.get('output_storage')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = '-v -x' if params.get('v2v_debug',
                                     'on') in ['on', 'force_on'] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_vm_check = params.get('skip_vm_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = ['floppy', 'floppy_devmap', 'fstab_cdrom',
                   'sata_disk', 'network_rtl8139', 'network_e1000',
                   'spice', 'spice_encrypt', 'spice_qxl',
                   'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
                   'listen_none', 'listen_socket', 'only_net', 'only_br']
    error_list = []

    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')

    if version_required and not utils_v2v.multiple_versions_compare(
            version_required):
        test.cancel("Testing requires version: %s" % version_required)

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
            source_ip, source_user, source_pwd, auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {'uri': remote_uri,
                       'remote_ip': source_ip,
                       'remote_user': source_user,
                       'remote_pwd': source_pwd,
                       'auto_close': True,
                       'debug': True}
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        LOG.debug('a new virsh session %s was created', v2v_virsh)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """

        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                LOG.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        LOG.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            LOG.debug("Disks in VM: %s", output)
            disks = len(re.findall(r'Disk\s\d', output))
        LOG.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            LOG.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        LOG.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall(
            r'(\* kernel.*?\/boot\/config){1,}',
            v2v_output,
            re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        LOG.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall(
                r'((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i)
            LOG.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be
            # same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd(
            'rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines()
        LOG.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error(
                'Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after conversion
        """
        blk = vmcheck.session.cmd('lsblk')
        LOG.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file',
                'type': type, 'targetbus': bus[type]}
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        LOG.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all_by_expr('device==disk')
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set(
                'dev',
                dev_table[dest] +
                'd' +
                string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        LOG.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, vmxml):
        """
        Check if number and type of network cards meet expectation
        """
        xmltree = xml_utils.XMLTreeFile(vmxml)
        iface_nodes = xmltree.find('devices').findall('interface')
        iflist = {}
        for node in iface_nodes:
            mac_addr = node.find('mac').get('address')
            iflist[mac_addr] = node

        LOG.debug('MAC list before v2v: %s' % mac_list)
        LOG.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {'root': 'e2label %s ROOT',
                   'swap': 'swaplabel -L SWAPPER %s'}
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            LOG.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM',
                'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            LOG.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        LOG.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_guestfish = "guestfish get-cachedir"
        tmp_dir = session.cmd_output(cmd_guestfish).split()[-1]
        LOG.debug('Command output of tmp_dir: %s', tmp_dir)
        cmd_df = "df -m %s --output=avail" % tmp_dir
        df_output = session.cmd(cmd_df).strip()
        LOG.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        LOG.info('Available space: %dM' % avail)
        if avail <= left_space - 1:
            return None
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        large_file = os.path.join(tmp_dir, 'file.large')
        cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                     (large_file, avail - left_space + 2)
        session.cmd(cmd_create, timeout=v2v_timeout)
        newAvail = int(session.cmd(cmd_df).strip().split('\n')[-1])
        LOG.info('New Available space: %sM' % newAvail)
        return large_file

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        output = session.cmd_output('yum update --assumeno')
        LOG.debug(output)
        if 'rpmdb open failed' not in output:
            test.error('Prepare corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        LOG.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            LOG.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST;
        # 3min 48s ago
        firewalld_status = session.cmd(
            'systemctl status firewalld.service|grep Active:',
            ok_status=[
                0,
                3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(
            r'Active:\s\w*\s\(\w*\)',
            firewalld_status).group()
        LOG.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd(
            'systemctl status '
            'firewalld.service|grep Active:', ok_status=[
                0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(
            r'Active:\s\w*\s\(\w*\)',
            firewalld_status).group()
        LOG.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Execute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            LOG.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            LOG.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        LOG.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after conversion.
        """
        LOG.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        LOG.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        LOG.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            LOG.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            LOG.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s' % str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            LOG.debug(vmchecker.vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removable devices(floppy,
                # cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'],
                                     vmchecker.vmxml)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmchecker.xmltree.find(
                        './devices/video/model').get('type')
                    if utils_v2v.multiple_versions_compare(V2V_ADAPTE_SPICE_REMOVAL_VER):
                        expect_video_type = 'vga'
                    else:
                        expect_video_type = 'qxl'

                    if video_type.lower() != expect_video_type:
                        log_fail('Video expect %s, actual %s' % (expect_video_type, video_type))
            if checkpoint.startswith('listen'):
                listen_type = vmchecker.xmltree.find(
                    './devices/graphics/listen').get('type')
                LOG.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                LOG.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'check_selinuxtype':
                expect_output = vmchecker.checker.session.cmd(
                    'cat /etc/selinux/config')
                expect_selinuxtype = re.search(
                    r'^SELINUXTYPE=\s*(\S+)$', expect_output, re.MULTILINE).group(1)
                actual_output = vmchecker.checker.session.cmd('sestatus')
                actual_selinuxtype = re.search(
                    r'^Loaded policy name:\s*(\S+)$',
                    actual_output,
                    re.MULTILINE).group(1)
                if actual_selinuxtype != expect_selinuxtype:
                    log_fail('Seliunx type not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'os_storage': storage,
            'os_pool': os_pool,
            'hostname': source_ip,
            'password': source_pwd,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3),
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'input_transport': input_transport,
            'vcenter_host': source_ip,
            'vcenter_password': source_pwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'params': params,
        }
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({'sasl_user': params.get("sasl_user") +
                           utils_misc.generate_random_string(3)})
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd,
                                       ovirt_ca_file_path,
                                       local_ca_file_path)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        utils_v2v.set_libguestfs_backend(params)

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')
            params['vm_machine'] = ori_vm_xml.xmltreefile.find(
                './os/type').get('machine')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = len(new_xml.get_disk_all_by_expr('device==disk'))
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            if params['vm_machine'] and 'q35' in params['vm_machine'] and int(
                    re.search(r'pc-q35-rhel(\d+)\.', params['vm_machine']).group(1)) >= 8:
                test.cancel(
                    'Device isa-fdc is not supported with machine type %s' %
                    params['vm_machine'])
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            large_file = create_large_file(session, 800)
            if checkpoint == 'host_no_space_setcache':
                LOG.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            LOG.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {'type': 'spice',
                                'passwd': params.get('spice_passwd', 'redhat')}
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {'type': 'spice',
                                      'passwdValidTo': '1970-01-01T00:00:01'}
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                LOG.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    LOG.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find(
                        'devices').find('video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [video.attrib.pop(attr_i) for attr_i in [
                            'ram', 'vgamem'] if attr_i in video.attrib]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find(
                'devices').find('graphics').find('listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            LOG.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            LOG.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            LOG.info('Set service chronyd on')
            cmd = ['yum -y install chrony',
                   'systemctl start chronyd',
                   'chronyc add server %s' % ntp_server]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            LOG.info('Sync time with %s', ntp_server)
            cmd = ['yum -y install chrony',
                   'systemctl start chronyd',
                   'chronyc add server %s' % ntp_server,
                   'chronyc waitsync']
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            LOG.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            LOG.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            LOG.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            LOG.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            LOG.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            LOG.info('Attach network')
            virsh.attach_interface(
                vm_name, 'network default --current', debug=True)
        if checkpoint == 'only_br':
            LOG.info('Attatch bridge')
            virsh.attach_interface(
                vm_name, 'bridge virbr0 --current', debug=True)
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            LOG.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh and v2v_virsh:
            LOG.debug('virsh session %s is closing', v2v_virsh)
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run('ssh-agent -k')
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status(
                    'firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            if large_file and os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Esempio n. 19
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    V2V_UNSUPPORT_RHEV_APT_VER = "[virt-v2v-1.43.3-4.el9,)"

    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_cp = params.get("enable_legacy_crypto_policies",
                                  'no') == 'yes'
    version_requried = params.get("version_requried")
    unprivileged_user = params_get(params, 'unprivileged_user')
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '').split(',')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    json_disk_pattern = params.get('json_disk_pattern')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    os_version = params.get('os_version')
    os_type = params.get('os_type')
    virtio_win_path = params.get('virtio_win_path')
    # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso
    qa_path = params.get('qa_path')
    # download url of qemu-guest-agent
    qa_url = params.get('qa_url')
    v2v_sasl = None
    # default values for v2v_cmd
    auto_clean = True
    cmd_only = False
    cmd_has_ip = 'yes' == params.get('cmd_has_ip', 'yes')
    interaction_run = 'yes' == params.get('interaction_run', 'no')

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if "service" not in check:
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        else:
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        if os_version == 'rhel7':
            chkfiles = [
                '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
            ]
        if os_version == 'rhel6':
            chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf']
        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        # rhev-apt.ext is removed on rhel9
        if utils_v2v.multiple_versions_compare(V2V_UNSUPPORT_RHEV_APT_VER):
            file_path.pop('rhev-apt.exe')
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_windows_signature(vmcheck, full_name):
        """
        Check signature of a file in windows VM

        :param vmcheck: VMCheck object for vm checking
        :param full_name: a file's full path name
        """
        logging.info(
            'powershell or signtool needs to be installed in guest first')

        cmds = [('powershell "Get-AuthenticodeSignature %s | format-list"' %
                 full_name, r'SignerCertificate.*?Not After](.*?)\[Thumbprint',
                 '%m/%d/%Y %I:%M:%S %p'),
                ('signtool verify /v %s' % full_name,
                 r'Issued to: Red Hat.*?Expires:(.*?)SHA1 hash', '')]
        for cmd, ptn, fmt in cmds:
            _, output = vmcheck.run_cmd(cmd)
            if re.search(ptn, output, re.S):
                expire_time = re.search(ptn, output, re.S).group(1).strip()
                if fmt:
                    expire_time = time.strptime(expire_time, fmt)
                else:
                    expire_time = time.strptime(expire_time)
                if time.time() > time.mktime(expire_time):
                    test.fail("Signature of '%s' has expired" % full_name)
                return
        # Get here means the guest doesn't have powershell or signtool
        test.error("Powershell or Signtool must be installed in guest")

    def check_windows_vmware_tools(vmcheck):
        """
        Check vmware tools is uninstalled in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def _get_vmware_info(cmd):
            _, res = vmcheck.run_cmd(cmd)
            if res and not re.search('vmtools', res, re.I):
                return True
            return False

        cmds = ['tasklist', 'sc query vmtools']
        for cmd in cmds:
            res = utils_misc.wait_for(lambda: _get_vmware_info(cmd),
                                      600,
                                      step=30)
            if not res:
                test.fail("Failed to verification vmtools uninstallation")

    def check_windows_service(vmcheck, service_name):
        """
        Check service in VM

        :param vmcheck: VMCheck object for vm checking
        :param service_name: a service's name
        """
        try:
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info(service_name), re.I),
                                      600,
                                      step=30)
        except (ShellProcessTerminatedError, ShellStatusError):
            # Windows guest may reboot after installing qemu-ga service
            logging.debug('Windows guest is rebooting')
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            # VM boots up is extremely slow when all testing in running on
            # rhv server simultaneously, so set timeout to 1200.
            vmcheck.create_session(timeout=1200)
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info(service_name), re.I),
                                      600,
                                      step=30)

        if not res:
            test.fail('Not found running %s service' % service_name)

    def check_linux_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vendor = vmcheck.get_vm_os_vendor()
            if vendor in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        if os.path.isfile(os.getenv('VIRTIO_WIN')):
            mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                              'rhv_tools_setup_iso',
                                              fstype='iso9660')
            export_path = params['tmp_mount_point'] = mount_point
        else:
            export_path = os.getenv('VIRTIO_WIN')

        qemu_guest_agent_dir = os.path.join(export_path, qa_path)
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent version in vm: %s' % vm_pkg_ver)

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent;systemctl status qemu-ga*'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_ubuntools(vmcheck):
        """
        Check open-vm-tools, ubuntu-server in VM

        :param vmcheck: VMCheck object for vm checking
        """
        logging.info('Check if open-vm-tools service stopped')
        status = utils_misc.get_guest_service_status(vmcheck.session,
                                                     'open-vm-tools')
        logging.info('Service open-vm-tools status: %s', status)
        if status != 'inactive':
            log_fail('Service open-vm-tools is not stopped')
        else:
            logging.info('Check if the ubuntu-server exist')
            content = vmcheck.session.cmd('dpkg -s ubuntu-server')
            if 'install ok installed' in content:
                logging.info('ubuntu-server has not been removed.')
            else:
                log_fail('ubuntu-server has been removed')

    def global_pem_setup(f_pem):
        """
        Setup global rhv server ca

        :param f_pem: ca file path
        """
        ca_anchors_dir = '/etc/pki/ca-trust/source/anchors'
        shutil.copy(f_pem, ca_anchors_dir)
        process.run('update-ca-trust extract', shell=True)
        os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem)))

    def global_pem_cleanup():
        """
        Cleanup global rhv server ca
        """
        process.run('update-ca-trust extract', shell=True)

    def find_net(bridge_name):
        """
        Find which network use specified bridge

       :param bridge_name: bridge name you want to find
        """
        net_list = virsh.net_state_dict(only_names=True)
        net_name = ''
        if len(net_list):
            for net in net_list:
                net_info = virsh.net_info(net).stdout.strip()
                search = re.search(r'Bridge:\s+(\S+)', net_info)
                if search:
                    if bridge_name == search.group(1):
                        net_name = net
        else:
            logging.info('Conversion server has no network')
        return net_name

    def destroy_net(net_name):
        """
        destroy network in conversion server
        """
        if virsh.net_state_dict()[net_name]['active']:
            logging.info("Remove network %s in conversion server", net_name)
            virsh.net_destroy(net_name)
            if virsh.net_state_dict()[net_name]['autostart']:
                virsh.net_autostart(net_name, "--disable")
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def start_net(net_name):
        """
        start network in conversion server
        """
        logging.info("Recover network %s in conversion server", net_name)
        virsh.net_autostart(net_name)
        if not virsh.net_state_dict()[net_name]['active']:
            virsh.net_start(net_name)
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def check_static_ip_conf(vmcheck):
        """
        Check static IP configuration in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def _static_ip_check():
            cmd = 'ipconfig /all'
            _, output = vmcheck.run_cmd(cmd, debug=False)
            v2v_cmd = params_get(params, 'v2v_command')
            # --mac 00:50:56:ac:7a:4d:ip:192.168.1.2,192.168.1.1,22,192.168.1.100,10.73.2.108,10.66.127.10'
            mac_ip_pattern = '--mac (([0-9a-zA-Z]{2}:){6})ip:([0-9,.]+)'
            ip_config_list = re.search(mac_ip_pattern, v2v_cmd).group(3)
            mac_addr = re.search(mac_ip_pattern,
                                 v2v_cmd).group(1)[0:-1].upper().replace(
                                     ':', '-')
            eth_adapter_ptn = r'Ethernet adapter Ethernet.*?NetBIOS over Tcpip'

            try:
                ipconfig = [
                    v for v in re.findall(eth_adapter_ptn, output, re.S)
                    if mac_addr in v
                ][0]
            except IndexError:
                return False

            for i, value in enumerate(ip_config_list.split(',')):
                if not value:
                    continue
                # IP address
                if i == 0:
                    ip_addr = r'IPv4 Address.*?: %s' % value
                    if not re.search(ip_addr, ipconfig, re.S):
                        logging.debug('Found IP addr failed')
                        return False
                # Default gateway
                if i == 1:
                    ip_gw = r'Default Gateway.*?: .*?%s' % value
                    if not re.search(ip_gw, ipconfig, re.S):
                        logging.debug('Found Gateway failed')
                        return False
                # Subnet mask
                if i == 2:
                    # convert subnet mask to cidr
                    bin_mask = '1' * int(value) + '0' * (32 - int(value))
                    cidr = '.'.join([
                        str(int(bin_mask[i * 8:i * 8 + 8], 2))
                        for i in range(4)
                    ])
                    sub_mask = r'Subnet Mask.*?: %s' % cidr
                    if not re.search(sub_mask, ipconfig, re.S):
                        logging.debug('Found subnet mask failed')
                        return False
                # DNS server list
                if i >= 3:
                    dns_server = r'DNS Servers.*?:.*?%s' % value
                    if not re.search(dns_server, ipconfig, re.S):
                        logging.debug('Found DNS Server failed')
                        return False
            return True

        try:
            vmcheck.create_session()
            res = utils_misc.wait_for(_static_ip_check, 1800, step=300)
        except (ShellTimeoutError, ShellProcessTerminatedError):
            logging.debug(
                'Lost connection to windows guest, the static IP may take effect'
            )
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            vmcheck.create_session()
            res = utils_misc.wait_for(_static_ip_check, 300, step=30)
        vmcheck.run_cmd('ipconfig /all')  # debug msg
        if not res:
            test.fail('Checking static IP configuration failed')

    def check_rhsrvany_checksums(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        def _get_expected_checksums(tool_exec, file):
            val = process.run('%s %s' % (tool_exec, rhsrvany_path),
                              shell=True).stdout_text.split()[0]

            if not val:
                test.error('Get checksum failed')
            logging.info('%s: Expect %s: %s', file, tool_exec, val)
            return val

        def _get_real_checksums(algorithm, file):
            certutil_cmd = r'certutil -hashfile "%s"' % file
            if algorithm == 'md5':
                certutil_cmd += ' MD5'

            res = vmcheck.session.cmd_output(certutil_cmd, safe=True)
            logging.debug('%s output:\n%s', certutil_cmd, res)

            val = res.strip().splitlines()[1].strip()
            logging.info('%s: Real %s: %s', file, algorithm, val)
            return val

        logging.info('Check md5 and sha1 of rhsrvany.exe')

        algorithms = {'md5': 'md5sum', 'sha1': 'sha1sum'}

        rhsrvany_path = r'/usr/share/virt-tools/rhsrvany.exe'
        rhsrvany_path_windows = r"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"

        for key, val in algorithms.items():
            expect_val = _get_expected_checksums(val, rhsrvany_path)
            real_val = _get_real_checksums(key, rhsrvany_path_windows)
            if expect_val == real_val:
                logging.info('%s are correct', key)
            else:
                test.fail('%s of rhsrvany.exe is not correct' % key)

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after conversion
            logging.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controlled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                if not utils_v2v.multiple_versions_compare(
                        V2V_UNSUPPORT_RHEV_APT_VER):
                    check_windows_signature(vmchecker.checker,
                                            r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml:
                test.fail('CDROM no longer exists')
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'vmware_tools' in checkpoint:
                check_windows_vmware_tools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            if 'rhsrvany_checksum' in checkpoint:
                check_rhsrvany_checksums(vmchecker.checker)
            if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link):
                test.fail("checkpoint '%s' failed" % checkpoint)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
            # Virtio drivers will not be installed without virtio-win setup
            if 'virtio_win_unset' in checkpoint:
                missing_list = params.get('missing').split(',')
                expect_errors = ['Not find driver: ' + x for x in missing_list]
                logging.debug('Expect errors: %s' % expect_errors)
                logging.debug('Actual errors: %s' % error_list)
                if set(error_list) == set(expect_errors):
                    error_list[:] = []
                else:
                    logging.error('Virtio drivers not meet expectation')

        utils_v2v.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Check log size decrease option
        if 'log decrease' in checkpoint:
            nbdkit_option = r'nbdkit\.backend\.datapath=0'
            if not re.search(nbdkit_option, output):
                test.fail("checkpoint '%s' failed" % checkpoint)
        if 'fstrim_warning' in checkpoint:
            # Actually, fstrim has no relationship with v2v, it may be related
            # to kernel, this warning really doesn't matter and has no harm to
            # the conversion.
            V2V_FSTRIM_SUCESS_VER = "[virt-v2v-1.45.1-1.el9,)"
            if utils_v2v.multiple_versions_compare(V2V_FSTRIM_SUCESS_VER):
                params.update({'expect_msg': None})
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        if version_requried and not utils_v2v.multiple_versions_compare(
                version_requried):
            test.cancel("Testing requires version: %s" % version_requried)

        # See man virt-v2v-input-xen(1)
        if enable_legacy_cp:
            process.run('update-crypto-policies --set LEGACY',
                        verbose=True,
                        ignore_status=True,
                        shell=True)

        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'os_storage': os_storage,
            'os_pool': os_pool,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'oo_json_disk_pattern': json_disk_pattern,
            'cmd_has_ip': cmd_has_ip,
            'params': params
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'of_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if 'root' in checkpoint and 'ask' in checkpoint:
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if 'root' in checkpoint and 'ask' not in checkpoint:
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if 'with_proxy' in checkpoint:
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if 'ogac' in checkpoint:
            os.environ['VIRTIO_WIN'] = virtio_win_path
            if not os.path.exists(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

            if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux':
                export_path = os.getenv('VIRTIO_WIN')
                qemu_guest_agent_dir = os.path.join(export_path, qa_path)
                if not os.path.exists(qemu_guest_agent_dir) and os.access(
                        export_path, os.W_OK) and qa_url:
                    logging.debug(
                        'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,'
                        ' Try to prepare it manually. This is not a permanent step, once'
                        ' the official build includes it, this step should be removed.'
                    )
                    os.makedirs(qemu_guest_agent_dir)
                    rpm_name = os.path.basename(qa_url)
                    download.get_file(
                        qa_url, os.path.join(qemu_guest_agent_dir, rpm_name))

        if 'virtio_iso_blk' in checkpoint:
            if not os.path.exists(virtio_win_path):
                test.fail('%s does not exist' % virtio_win_path)

            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup a loop device
            cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path)
            process.run(cmd, shell=True)
            os.environ['VIRTIO_WIN'] = free_loop_dev

        if 'block_dev' in checkpoint:
            os_directory = params_get(params, 'os_directory')
            block_count = params_get(params, 'block_count')
            os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_',
                                                       dir=os_directory)
            diskimage = '%s/diskimage' % os_directory.name
            # Update 'os_directory' for '-os' option
            params['os_directory'] = os_directory.name

            # Create a 1G image
            cmd = 'dd if=/dev/zero of=%s bs=10M count=%s' % (diskimage,
                                                             block_count)
            process.run(cmd, shell=True)
            # Build filesystem
            cmd = 'mkfs.ext4 %s' % diskimage
            process.run(cmd, shell=True)
            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup the image as a block device
            cmd = 'losetup %s %s' % (free_loop_dev, diskimage)
            process.run(cmd, shell=True)
            # Create a soft link to the loop device
            blk_dev_link = '%s/mydisk1' % os_directory.name
            cmd = 'ln -s %s %s' % (free_loop_dev, blk_dev_link)
            process.run(cmd, shell=True)

        if 'invalid_pem' in checkpoint:
            # simply change the 2nd line to lowercase to get an invalid pem
            with open(local_ca_file_path, 'r+') as fd:
                for i in range(2):
                    pos = fd.tell()
                    res = fd.readline()
                fd.seek(pos)
                fd.write(res.lower())
                fd.flush()

        if 'without_default_net' in checkpoint:
            net_name = find_net('virbr0')
            if net_name:
                destroy_net(net_name)

        if 'bandwidth' in checkpoint:
            dynamic_speeds = params_get(params, 'dynamic_speeds')
            bandwidth_file = params_get(params, 'bandwidth_file')
            with open(bandwidth_file, 'w') as fd:
                fd.write(dynamic_speeds)

        if checkpoint[0].startswith('virtio_win'):
            cp = checkpoint[0]
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if cp.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if cp.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if cp.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if cp.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path

        if 'luks_dev_keys' in checkpoint:
            luks_password = params_get(params, 'luks_password', '')
            luks_keys = params_get(params, 'luks_keys', '')
            keys_options = ' '.join(
                list(
                    map(lambda i: '--key %s' % i if i else '',
                        luks_keys.split(';'))))

            if 'invalid_pwd_file' not in checkpoint:
                is_file_key = r'--key \S+:file:(\S+)'
                for file_key in re.findall(is_file_key, keys_options):
                    with open(file_key, 'w') as fd:
                        fd.write(luks_password)
            v2v_params['v2v_opts'] += ' ' + keys_options

        if 'empty_cdrom' in checkpoint:
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'auto_close': True,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
            remote_virsh.close_session()
        else:
            if 'exist_uuid' in checkpoint:
                auto_clean = False
            if checkpoint[0] in [
                    'mismatched_uuid', 'no_uuid', 'invalid_source',
                    'system_rhv_pem'
            ]:
                cmd_only = True
                auto_clean = False
            v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only,
                                           interaction_run)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        if 'system_rhv_pem' in checkpoint:
            if 'set' in checkpoint:
                global_pem_setup(local_ca_file_path)
            rhv_cafile = r'-oo rhv-cafile=\S+\s*'
            new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_cafile)
            logging.debug('New v2v command:\n%s', new_cmd)
        if 'mismatched_uuid' in checkpoint:
            # append more uuid
            new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4())
        if 'no_uuid' in checkpoint:
            rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*'
            new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_disk_uuid)
            logging.debug('New v2v command:\n%s', new_cmd)
        if 'exist_uuid' in checkpoint:
            # Use to cleanup the VM because it will not be run in check_result
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            # Update name to avoid conflict
            new_vm_name = v2v_params['new_name'] + '_exist_uuid'
            new_cmd = v2v_result.command.replace('-on %s' % vm_name,
                                                 '-on %s' % new_vm_name)
            new_cmd += ' --no-copy'
            logging.debug('re-run v2v command:\n%s', new_cmd)
        if 'invalid_source' in checkpoint:
            if params.get('invalid_vpx_hostname'):
                new_cmd = v2v_result.replace(
                    vpx_hostname, params.get('invalid_vpx_hostname'))
            if params.get('invalid_esx_hostname'):
                new_cmd = v2v_result.replace(
                    esxi_host, params.get('invalid_esx_hostname'))

        if checkpoint[0] in [
                'mismatched_uuid', 'no_uuid', 'invalid_source', 'exist_uuid',
                'system_rhv_pem'
        ]:
            v2v_result = utils_v2v.cmd_run(new_cmd,
                                           params.get('v2v_dirty_resources'))

        check_result(v2v_result, status_error)

    finally:
        if enable_legacy_cp:
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
        if checkpoint[0].startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
        if 'virtio_win_iso_mount' in checkpoint:
            process.run('umount /opt', ignore_status=True)
        if 'ogac' in checkpoint and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if 'block_dev' in checkpoint and hasattr(os_directory, 'name'):
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os_directory.cleanup()
        if 'virtio_iso_blk' in checkpoint:
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os.environ.pop('VIRTIO_WIN')
        if 'system_rhv_pem' in checkpoint and 'set' in checkpoint:
            global_pem_cleanup()
        if 'without_default_net' in checkpoint:
            if net_name:
                start_net(net_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if 'with_proxy' in checkpoint:
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        if unprivileged_user:
            process.system("userdel -fr %s" % unprivileged_user)
        if params.get('os_directory') and os.path.isdir(
                params['os_directory']):
            shutil.rmtree(params['os_directory'], ignore_errors=True)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Esempio n. 20
0
    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                LOG.info('Skip checking vm after conversion: %s' % skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after conversion
            LOG.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controlled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                LOG.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                if not utils_v2v.multiple_versions_compare(
                        V2V_UNSUPPORT_RHEV_APT_VER):
                    check_windows_signature(vmchecker.checker,
                                            r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml:
                test.fail('CDROM no longer exists')
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'vmware_tools' in checkpoint:
                check_windows_vmware_tools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            if 'rhsrvany_checksum' in checkpoint:
                check_rhsrvany_checksums(vmchecker.checker)
            if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link):
                test.fail("checkpoint '%s' failed" % checkpoint)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
            # Virtio drivers will not be installed without virtio-win setup
            if 'virtio_win_unset' in checkpoint:
                missing_list = params.get('missing').split(',')
                expect_errors = ['Not find driver: ' + x for x in missing_list]
                LOG.debug('Expect errors: %s' % expect_errors)
                LOG.debug('Actual errors: %s' % error_list)
                if set(error_list) == set(expect_errors):
                    error_list[:] = []
                else:
                    LOG.error('Virtio drivers not meet expectation')

        utils_v2v.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Check log size decrease option
        if 'log decrease' in checkpoint:
            nbdkit_option = r'nbdkit\.backend\.datapath=0'
            if not re.search(nbdkit_option, output):
                test.fail("checkpoint '%s' failed" % checkpoint)
        if 'fstrim_warning' in checkpoint:
            # Actually, fstrim has no relationship with v2v, it may be related
            # to kernel, this warning really doesn't matter and has no harm to
            # the conversion.
            V2V_FSTRIM_SUCESS_VER = "[virt-v2v-1.45.1-1.el9,)"
            if utils_v2v.multiple_versions_compare(V2V_FSTRIM_SUCESS_VER):
                params.update({'expect_msg': None})
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))
Esempio n. 21
0
    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    # 76 is the max length in v2v
                    if v2v_start and len(line) > 76:
                        test.fail('Error log longer than 76 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root:
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    vmchecker = VMChecker(test, params, env)
                    params['vmchecker'] = vmchecker
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint in ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                if utils_v2v.multiple_versions_compare(
                                        V2V_UNSUPPORT_GLANCE_VER
                                ) and 'glance' in line:
                                    continue
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
            if checkpoint == 'print_estimate_tofile':
                check_print_estimate(estimate_file)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))
Esempio n. 22
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    version_requried = params.get("version_requried")
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    json_disk_pattern = params.get('json_disk_pattern')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    os_version = params.get('os_version')
    os_type = params.get('os_type')
    virtio_win_path = params.get('virtio_win_path')
    # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso
    qa_path = params.get('qa_path')
    # download url of qemu-guest-agent
    qa_url = params.get('qa_url')
    v2v_sasl = None
    # default values for v2v_cmd
    auto_clean = True
    cmd_only = False

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if check == 'vmtools':
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        elif check == 'vmtools_service':
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        if os_version == 'rhel7':
            chkfiles = [
                '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
            ]
        if os_version == 'rhel6':
            chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf']
        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_windows_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        try:
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info('qemu-ga'), re.I),
                                      300,
                                      step=30)
        except ShellProcessTerminatedError:
            # Windows guest may reboot after installing qemu-ga service
            logging.debug('Windows guest is rebooting')
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            vmcheck.create_session()
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info('qemu-ga'), re.I),
                                      300,
                                      step=30)

        if not res:
            test.fail('Not found running qemu-ga service')

    def check_linux_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vender = vmcheck.get_vm_os_vendor()
            if vender in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        if os.path.isfile(os.getenv('VIRTIO_WIN')):
            mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                              'rhv_tools_setup_iso',
                                              fstype='iso9660')
            export_path = params['tmp_mount_point'] = mount_point
        else:
            export_path = os.getenv('VIRTIO_WIN')

        qemu_guest_agent_dir = os.path.join(export_path, qa_path)
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent verion in vm: %s' % vm_pkg_ver)

        # If qemu-guest-agent version in VM is higher than the pkg in qemu-guest-agent-iso,
        # v2v will not update the qemu-guest-agent version and report a warning.
        #
        # e.g.
        # virt-v2v: warning: failed to install QEMU Guest Agent: command:         package
        # qemu-guest-agent-10:2.12.0-3.el7.x86_64 (which is newer than
        # qemu-guest-agent-10:2.12.0-2.el7.x86_64) is already installed
        if not any([vm_pkg_ver in pkg for pkg in all_pkgs]):
            logging.debug(
                'Wrong qemu-guest-agent version, maybe it is higher than package version in ISO'
            )
            logging.info(
                'Unexpected qemu-guest-agent version, set v2v log checking')
            expect_msg_ptn = r'virt-v2v: warning: failed to install QEMU Guest Agent.*?is newer than.*? is already installed'
            params.update({'msg_content': expect_msg_ptn, 'expect_msg': 'yes'})

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_ubuntools(vmcheck):
        """
        Check open-vm-tools, ubuntu-server in VM

        :param vmcheck: VMCheck object for vm checking
        """
        logging.info('Check if open-vm-tools service stopped')
        status = utils_misc.get_guest_service_status(vmcheck.session,
                                                     'open-vm-tools')
        logging.info('Service open-vm-tools status: %s', status)
        if status != 'inactive':
            log_fail('Service open-vm-tools is not stopped')
        else:
            logging.info('Check if the ubuntu-server exist')
            content = vmcheck.session.cmd('dpkg -s ubuntu-server')
            if 'install ok installed' in content:
                logging.info('ubuntu-server has not been removed.')
            else:
                log_fail('ubuntu-server has been removed')

    def global_pem_setup(f_pem):
        """
        Setup global rhv server ca

        :param f_pem: ca file path
        """
        ca_anchors_dir = '/etc/pki/ca-trust/source/anchors'
        shutil.copy(f_pem, ca_anchors_dir)
        process.run('update-ca-trust extract', shell=True)
        os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem)))

    def global_pem_cleanup():
        """
        Cleanup global rhv server ca
        """
        process.run('update-ca-trust extract', shell=True)

    def cmd_remove_option(cmd, opt_pattern):
        """
        Remove an option from cmd

        :param cmd: the cmd
        :param opt_pattern: a pattern stands for the option
        """
        for item in re.findall(opt_pattern, cmd):
            cmd = cmd.replace(item, '').strip()
        return cmd

    def find_net(bridge_name):
        """
        Find which network use specified bridge

       :param bridge_name: bridge name you want to find
        """
        net_list = virsh.net_state_dict(only_names=True)
        net_name = ''
        if len(net_list):
            for net in net_list:
                net_info = virsh.net_info(net).stdout.strip()
                search = re.search(r'Bridge:\s+(\S+)', net_info)
                if search:
                    if bridge_name == search.group(1):
                        net_name = net
        else:
            logging.info('Conversion server has no network')
        return net_name

    def destroy_net(net_name):
        """
        destroy network in conversion server
        """
        if virsh.net_state_dict()[net_name]['active']:
            logging.info("Remove network %s in conversion server", net_name)
            virsh.net_destroy(net_name)
            if virsh.net_state_dict()[net_name]['autostart']:
                virsh.net_autostart(net_name, "--disable")
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def start_net(net_name):
        """
        start network in conversion server
        """
        logging.info("Recover network %s in conversion server", net_name)
        virsh.net_autostart(net_name)
        if not virsh.net_state_dict()[net_name]['active']:
            virsh.net_start(net_name)
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            if checkpoint == 'ogac':
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    check_windows_ogac(vmchecker.checker)
                else:
                    check_linux_ogac(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
                virsh_session.close()
            if checkpoint.startswith('vmtools'):
                check_vmtools(vmchecker.checker, checkpoint)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'resume_swap':
                check_resume_swap(vmchecker.checker)
            if checkpoint == 'rhev_file':
                check_rhev_file_exist(vmchecker.checker)
            if checkpoint == 'file_architecture':
                check_file_architecture(vmchecker.checker)
            if checkpoint == 'ubuntu_tools':
                check_ubuntools(vmchecker.checker)
            if checkpoint == 'without_default_net':
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)

        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Check log size decrease option
        if checkpoint == 'log decrease':
            nbdkit_option = r'nbdkit\.backend\.datapath=0'
            if not re.search(nbdkit_option, output):
                test.fail("checkpoint '%s' failed" % checkpoint)
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        if version_requried and not utils_v2v.multiple_versions_compare(
                version_requried):
            test.cancel("Testing requries version: %s" % version_requried)

        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'os_storage': os_storage,
            'os_pool': os_pool,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'oo_json_disk_pattern': json_disk_pattern,
            'params': params
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'of_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'ogac':
            os.environ['VIRTIO_WIN'] = virtio_win_path
            if not os.path.exists(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

            if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux':
                export_path = os.getenv('VIRTIO_WIN')
                qemu_guest_agent_dir = os.path.join(export_path, qa_path)
                if not os.path.exists(qemu_guest_agent_dir) and os.access(
                        export_path, os.W_OK) and qa_url:
                    logging.debug(
                        'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,'
                        ' Try to prepare it manually. This is not a permanant step, once'
                        ' the official build includes it, this step should be removed.'
                    )
                    os.makedirs(qemu_guest_agent_dir)
                    rpm_name = os.path.basename(qa_url)
                    download.get_file(
                        qa_url, os.path.join(qemu_guest_agent_dir, rpm_name))

        if checkpoint == 'virtio_iso_blk':
            if not os.path.exists(virtio_win_path):
                test.fail('%s does not exist' % virtio_win_path)

            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup a loop device
            cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path)
            process.run(cmd, shell=True)
            os.environ['VIRTIO_WIN'] = free_loop_dev

        if checkpoint == 'invalid_pem':
            # simply change the 2nd line to lowercase to get an invalid pem
            with open(local_ca_file_path, 'r+') as fd:
                for i in range(2):
                    pos = fd.tell()
                    res = fd.readline()
                fd.seek(pos)
                fd.write(res.lower())
                fd.flush()

        if checkpoint == 'without_default_net':
            net_name = find_net('virbr0')
            if net_name:
                destroy_net(net_name)

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'auto_close': True,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
            remote_virsh.close_session()
        else:
            if checkpoint == 'exist_uuid':
                auto_clean = False
            if checkpoint in [
                    'mismatched_uuid', 'no_uuid', 'system_rhv_pem_set',
                    'system_rhv_pem_unset'
            ]:
                cmd_only = True
                auto_clean = False
            v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        if checkpoint.startswith('system_rhv_pem'):
            if checkpoint == 'system_rhv_pem_set':
                global_pem_setup(local_ca_file_path)
            rhv_cafile = r'-oo rhv-cafile=\S+\s*'
            new_cmd = cmd_remove_option(v2v_result, rhv_cafile)
            logging.debug('New v2v command:\n%s', new_cmd)
        if checkpoint == 'mismatched_uuid':
            # append more uuid
            new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4())
        if checkpoint == 'no_uuid':
            rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*'
            new_cmd = cmd_remove_option(v2v_result, rhv_disk_uuid)
            logging.debug('New v2v command:\n%s', new_cmd)
        if checkpoint == 'exist_uuid':
            new_vm_name = v2v_params['new_name'] + '_exist_uuid'
            new_cmd = v2v_result.command.replace('-on %s' % vm_name,
                                                 '-on %s' % new_vm_name)
            logging.debug('re-run v2v command:\n%s', new_cmd)

        if checkpoint in [
                'mismatched_uuid', 'no_uuid', 'exist_uuid',
                'system_rhv_pem_set', 'system_rhv_pem_unset'
        ]:
            v2v_result = utils_v2v.cmd_run(new_cmd,
                                           params.get('v2v_dirty_resources'))

        check_result(v2v_result, status_error)

    finally:
        if checkpoint == 'ogac' and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if checkpoint == 'virtio_iso_blk':
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os.environ.pop('VIRTIO_WIN')
        if checkpoint == 'system_rhv_pem_set':
            global_pem_cleanup()
        if checkpoint == 'without_default_net':
            if net_name:
                start_net(net_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Esempio n. 23
0
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after conversion
            logging.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controlled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                if not utils_v2v.multiple_versions_compare(
                        V2V_UNSUPPORT_RHEV_APT_VER):
                    check_windows_signature(vmchecker.checker,
                                            r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml:
                test.fail('CDROM no longer exists')
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'vmware_tools' in checkpoint:
                check_windows_vmware_tools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            if 'rhsrvany_checksum' in checkpoint:
                check_rhsrvany_checksums(vmchecker.checker)
            if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link):
                test.fail("checkpoint '%s' failed" % checkpoint)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
            # Virtio drivers will not be installed without virtio-win setup
            if 'virtio_win_unset' in checkpoint:
                missing_list = params.get('missing').split(',')
                expect_errors = ['Not find driver: ' + x for x in missing_list]
                logging.debug('Expect errors: %s' % expect_errors)
                logging.debug('Actual errors: %s' % error_list)
                if set(error_list) == set(expect_errors):
                    error_list[:] = []
                else:
                    logging.error('Virtio drivers not meet expectation')
Esempio n. 24
0
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    os_pool = storage = params.get('storage')
    storage_name = params.get('storage_name')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_passwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split('/')[2]
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    skip_vm_check = params.get('skip_vm_check', 'no')
    os_version = params.get('os_version')
    os_type = params.get('os_type')

    # create different sasl_user name for different job
    params.update({'sasl_user': params.get("sasl_user") +
                   utils_misc.generate_random_string(3)})
    logging.info('sals user name is %s' % params.get("sasl_user"))

    # Prepare step for different hypervisor
    if hypervisor == "xen":
        # See man virt-v2v-input-xen(1)
        process.run(
            'update-crypto-policies --set LEGACY',
            verbose=True,
            ignore_status=True,
            shell=True)

    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
            source_ip, source_user, source_pwd, auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    if output_method == 'rhv_upload':
        # Create password file for '-o rhv_upload' to connect to ovirt
        with open(rhv_passwd_file, 'w') as f:
            f.write(rhv_passwd)
        # Copy ca file from ovirt to local
        remote.scp_from_remote(ovirt_hostname, 22, 'root',
                               ovirt_engine_passwd,
                               ovirt_ca_file_path,
                               local_ca_file_path)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip,
                       'remote_user': source_user, 'remote_pwd': source_pwd,
                       'auto_close': True,
                       'debug': True}
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            test.error("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {'target': target, 'hypervisor': hypervisor,
                  'main_vm': vm_name, 'input_mode': input_mode,
                  'network': network, 'bridge': bridge,
                  'os_storage': storage, 'hostname': source_ip,
                  # For virsh connection
                  'password': source_pwd,
                  'new_name': vm_name + utils_misc.generate_random_string(3),
                  'output_method': output_method, 'os_storage_name': storage_name,
                  'input_transport': input_transport, 'vcenter_host': vpx_ip,
                  'vcenter_password': vpx_pwd,
                  'vddk_thumbprint': vddk_thumbprint,
                  'vddk_libdir': vddk_libdir,
                  'vddk_libdir_src': vddk_libdir_src,
                  'params': params
                  }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    if rhv_upload_opts:
        v2v_params.update({"rhv_upload_opts": rhv_upload_opts})
    output_format = params.get('output_format')
    # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None
    if output_format:
        v2v_params.update({'of_format': output_format})

    # Set libguestfs environment variable
    if hypervisor in ('xen', 'kvm'):
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        v2v_ret = utils_v2v.v2v_cmd(v2v_params)
        if v2v_ret.exit_status != 0:
            test.fail("Convert VM failed")

        params['main_vm'] = v2v_params['new_name']

        logging.info("output_method is %s" % output_method)
        # Check all checkpoints after convert
        params['vmchecker'] = vmchecker = VMChecker(test, params, env)
        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                            timeout=v2v_timeout):
            test.error("Import VM failed")

        # When VM is on OSP, it can't obtain IP address, therefore
        # skipping the VM checking.
        if skip_vm_check == 'yes':
            logging.debug("skip vm checking")
            return

        ret = vmchecker.run()

        err_list = []
        virtio_win_ver = "[virtio-win-1.9.16-1,)"
        virtio_win_qxl_os = ['win2008r2', 'win7']
        virtio_win_qxldod_os = ['win10', 'win2016', 'win2019']

        if os_type == 'windows':
            virtio_win_support_qxldod = utils_v2v.multiple_versions_compare(
                virtio_win_ver)
            qxl_warning = "there is no QXL driver for this version of Windows"
            if virtio_win_support_qxldod and os_version in virtio_win_qxldod_os:
                has_qxl_warning = False
            elif os_version in virtio_win_qxl_os:
                has_qxl_warning = False
            else:
                has_qxl_warning = True

            if has_qxl_warning and not re.search(qxl_warning, v2v_ret.stdout_text):
                err_list.append('Not find QXL warning')
            if not has_qxl_warning and re.search(qxl_warning, v2v_ret.stdout_text):
                err_list.append('Unexpected QXL warning')

        ret.extend(err_list)

        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            test.fail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
            v2v_sasl.close_session()
        if hypervisor == "xen":
            # Restore crypto-policies to DEFAULT, the setting is impossible to be
            # other values by default in testing environment.
            process.run(
                'update-crypto-policies --set DEFAULT',
                verbose=True,
                ignore_status=True,
                shell=True)
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)