Beispiel #1
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    V2V_UNSUPPORT_RHEV_APT_VER = "[virt-v2v-1.43.3-4.el9,)"

    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_cp = params.get("enable_legacy_crypto_policies",
                                  'no') == 'yes'
    version_requried = params.get("version_requried")
    unprivileged_user = params_get(params, 'unprivileged_user')
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '').split(',')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    json_disk_pattern = params.get('json_disk_pattern')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    os_version = params.get('os_version')
    os_type = params.get('os_type')
    virtio_win_path = params.get('virtio_win_path')
    # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso
    qa_path = params.get('qa_path')
    # download url of qemu-guest-agent
    qa_url = params.get('qa_url')
    v2v_sasl = None
    # default values for v2v_cmd
    auto_clean = True
    cmd_only = False
    cmd_has_ip = 'yes' == params.get('cmd_has_ip', 'yes')
    interaction_run = 'yes' == params.get('interaction_run', 'no')

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if "service" not in check:
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        else:
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        if os_version == 'rhel7':
            chkfiles = [
                '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
            ]
        if os_version == 'rhel6':
            chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf']
        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        # rhev-apt.ext is removed on rhel9
        if utils_v2v.multiple_versions_compare(V2V_UNSUPPORT_RHEV_APT_VER):
            file_path.pop('rhev-apt.exe')
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_windows_signature(vmcheck, full_name):
        """
        Check signature of a file in windows VM

        :param vmcheck: VMCheck object for vm checking
        :param full_name: a file's full path name
        """
        logging.info(
            'powershell or signtool needs to be installed in guest first')

        cmds = [('powershell "Get-AuthenticodeSignature %s | format-list"' %
                 full_name, r'SignerCertificate.*?Not After](.*?)\[Thumbprint',
                 '%m/%d/%Y %I:%M:%S %p'),
                ('signtool verify /v %s' % full_name,
                 r'Issued to: Red Hat.*?Expires:(.*?)SHA1 hash', '')]
        for cmd, ptn, fmt in cmds:
            _, output = vmcheck.run_cmd(cmd)
            if re.search(ptn, output, re.S):
                expire_time = re.search(ptn, output, re.S).group(1).strip()
                if fmt:
                    expire_time = time.strptime(expire_time, fmt)
                else:
                    expire_time = time.strptime(expire_time)
                if time.time() > time.mktime(expire_time):
                    test.fail("Signature of '%s' has expired" % full_name)
                return
        # Get here means the guest doesn't have powershell or signtool
        test.error("Powershell or Signtool must be installed in guest")

    def check_windows_vmware_tools(vmcheck):
        """
        Check vmware tools is uninstalled in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def _get_vmware_info(cmd):
            _, res = vmcheck.run_cmd(cmd)
            if res and not re.search('vmtools', res, re.I):
                return True
            return False

        cmds = ['tasklist', 'sc query vmtools']
        for cmd in cmds:
            res = utils_misc.wait_for(lambda: _get_vmware_info(cmd),
                                      600,
                                      step=30)
            if not res:
                test.fail("Failed to verification vmtools uninstallation")

    def check_windows_service(vmcheck, service_name):
        """
        Check service in VM

        :param vmcheck: VMCheck object for vm checking
        :param service_name: a service's name
        """
        try:
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info(service_name), re.I),
                                      600,
                                      step=30)
        except (ShellProcessTerminatedError, ShellStatusError):
            # Windows guest may reboot after installing qemu-ga service
            logging.debug('Windows guest is rebooting')
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            # VM boots up is extremely slow when all testing in running on
            # rhv server simultaneously, so set timeout to 1200.
            vmcheck.create_session(timeout=1200)
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info(service_name), re.I),
                                      600,
                                      step=30)

        if not res:
            test.fail('Not found running %s service' % service_name)

    def check_linux_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vendor = vmcheck.get_vm_os_vendor()
            if vendor in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        if os.path.isfile(os.getenv('VIRTIO_WIN')):
            mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                              'rhv_tools_setup_iso',
                                              fstype='iso9660')
            export_path = params['tmp_mount_point'] = mount_point
        else:
            export_path = os.getenv('VIRTIO_WIN')

        qemu_guest_agent_dir = os.path.join(export_path, qa_path)
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent version in vm: %s' % vm_pkg_ver)

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent;systemctl status qemu-ga*'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_ubuntools(vmcheck):
        """
        Check open-vm-tools, ubuntu-server in VM

        :param vmcheck: VMCheck object for vm checking
        """
        logging.info('Check if open-vm-tools service stopped')
        status = utils_misc.get_guest_service_status(vmcheck.session,
                                                     'open-vm-tools')
        logging.info('Service open-vm-tools status: %s', status)
        if status != 'inactive':
            log_fail('Service open-vm-tools is not stopped')
        else:
            logging.info('Check if the ubuntu-server exist')
            content = vmcheck.session.cmd('dpkg -s ubuntu-server')
            if 'install ok installed' in content:
                logging.info('ubuntu-server has not been removed.')
            else:
                log_fail('ubuntu-server has been removed')

    def global_pem_setup(f_pem):
        """
        Setup global rhv server ca

        :param f_pem: ca file path
        """
        ca_anchors_dir = '/etc/pki/ca-trust/source/anchors'
        shutil.copy(f_pem, ca_anchors_dir)
        process.run('update-ca-trust extract', shell=True)
        os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem)))

    def global_pem_cleanup():
        """
        Cleanup global rhv server ca
        """
        process.run('update-ca-trust extract', shell=True)

    def find_net(bridge_name):
        """
        Find which network use specified bridge

       :param bridge_name: bridge name you want to find
        """
        net_list = virsh.net_state_dict(only_names=True)
        net_name = ''
        if len(net_list):
            for net in net_list:
                net_info = virsh.net_info(net).stdout.strip()
                search = re.search(r'Bridge:\s+(\S+)', net_info)
                if search:
                    if bridge_name == search.group(1):
                        net_name = net
        else:
            logging.info('Conversion server has no network')
        return net_name

    def destroy_net(net_name):
        """
        destroy network in conversion server
        """
        if virsh.net_state_dict()[net_name]['active']:
            logging.info("Remove network %s in conversion server", net_name)
            virsh.net_destroy(net_name)
            if virsh.net_state_dict()[net_name]['autostart']:
                virsh.net_autostart(net_name, "--disable")
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def start_net(net_name):
        """
        start network in conversion server
        """
        logging.info("Recover network %s in conversion server", net_name)
        virsh.net_autostart(net_name)
        if not virsh.net_state_dict()[net_name]['active']:
            virsh.net_start(net_name)
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def check_static_ip_conf(vmcheck):
        """
        Check static IP configuration in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def _static_ip_check():
            cmd = 'ipconfig /all'
            _, output = vmcheck.run_cmd(cmd, debug=False)
            v2v_cmd = params_get(params, 'v2v_command')
            # --mac 00:50:56:ac:7a:4d:ip:192.168.1.2,192.168.1.1,22,192.168.1.100,10.73.2.108,10.66.127.10'
            mac_ip_pattern = '--mac (([0-9a-zA-Z]{2}:){6})ip:([0-9,.]+)'
            ip_config_list = re.search(mac_ip_pattern, v2v_cmd).group(3)
            mac_addr = re.search(mac_ip_pattern,
                                 v2v_cmd).group(1)[0:-1].upper().replace(
                                     ':', '-')
            eth_adapter_ptn = r'Ethernet adapter Ethernet.*?NetBIOS over Tcpip'

            try:
                ipconfig = [
                    v for v in re.findall(eth_adapter_ptn, output, re.S)
                    if mac_addr in v
                ][0]
            except IndexError:
                return False

            for i, value in enumerate(ip_config_list.split(',')):
                if not value:
                    continue
                # IP address
                if i == 0:
                    ip_addr = r'IPv4 Address.*?: %s' % value
                    if not re.search(ip_addr, ipconfig, re.S):
                        logging.debug('Found IP addr failed')
                        return False
                # Default gateway
                if i == 1:
                    ip_gw = r'Default Gateway.*?: .*?%s' % value
                    if not re.search(ip_gw, ipconfig, re.S):
                        logging.debug('Found Gateway failed')
                        return False
                # Subnet mask
                if i == 2:
                    # convert subnet mask to cidr
                    bin_mask = '1' * int(value) + '0' * (32 - int(value))
                    cidr = '.'.join([
                        str(int(bin_mask[i * 8:i * 8 + 8], 2))
                        for i in range(4)
                    ])
                    sub_mask = r'Subnet Mask.*?: %s' % cidr
                    if not re.search(sub_mask, ipconfig, re.S):
                        logging.debug('Found subnet mask failed')
                        return False
                # DNS server list
                if i >= 3:
                    dns_server = r'DNS Servers.*?:.*?%s' % value
                    if not re.search(dns_server, ipconfig, re.S):
                        logging.debug('Found DNS Server failed')
                        return False
            return True

        try:
            vmcheck.create_session()
            res = utils_misc.wait_for(_static_ip_check, 1800, step=300)
        except (ShellTimeoutError, ShellProcessTerminatedError):
            logging.debug(
                'Lost connection to windows guest, the static IP may take effect'
            )
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            vmcheck.create_session()
            res = utils_misc.wait_for(_static_ip_check, 300, step=30)
        vmcheck.run_cmd('ipconfig /all')  # debug msg
        if not res:
            test.fail('Checking static IP configuration failed')

    def check_rhsrvany_checksums(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        def _get_expected_checksums(tool_exec, file):
            val = process.run('%s %s' % (tool_exec, rhsrvany_path),
                              shell=True).stdout_text.split()[0]

            if not val:
                test.error('Get checksum failed')
            logging.info('%s: Expect %s: %s', file, tool_exec, val)
            return val

        def _get_real_checksums(algorithm, file):
            certutil_cmd = r'certutil -hashfile "%s"' % file
            if algorithm == 'md5':
                certutil_cmd += ' MD5'

            res = vmcheck.session.cmd_output(certutil_cmd, safe=True)
            logging.debug('%s output:\n%s', certutil_cmd, res)

            val = res.strip().splitlines()[1].strip()
            logging.info('%s: Real %s: %s', file, algorithm, val)
            return val

        logging.info('Check md5 and sha1 of rhsrvany.exe')

        algorithms = {'md5': 'md5sum', 'sha1': 'sha1sum'}

        rhsrvany_path = r'/usr/share/virt-tools/rhsrvany.exe'
        rhsrvany_path_windows = r"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"

        for key, val in algorithms.items():
            expect_val = _get_expected_checksums(val, rhsrvany_path)
            real_val = _get_real_checksums(key, rhsrvany_path_windows)
            if expect_val == real_val:
                logging.info('%s are correct', key)
            else:
                test.fail('%s of rhsrvany.exe is not correct' % key)

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after conversion
            logging.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controlled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                if not utils_v2v.multiple_versions_compare(
                        V2V_UNSUPPORT_RHEV_APT_VER):
                    check_windows_signature(vmchecker.checker,
                                            r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml:
                test.fail('CDROM no longer exists')
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'vmware_tools' in checkpoint:
                check_windows_vmware_tools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            if 'rhsrvany_checksum' in checkpoint:
                check_rhsrvany_checksums(vmchecker.checker)
            if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link):
                test.fail("checkpoint '%s' failed" % checkpoint)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
            # Virtio drivers will not be installed without virtio-win setup
            if 'virtio_win_unset' in checkpoint:
                missing_list = params.get('missing').split(',')
                expect_errors = ['Not find driver: ' + x for x in missing_list]
                logging.debug('Expect errors: %s' % expect_errors)
                logging.debug('Actual errors: %s' % error_list)
                if set(error_list) == set(expect_errors):
                    error_list[:] = []
                else:
                    logging.error('Virtio drivers not meet expectation')

        utils_v2v.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Check log size decrease option
        if 'log decrease' in checkpoint:
            nbdkit_option = r'nbdkit\.backend\.datapath=0'
            if not re.search(nbdkit_option, output):
                test.fail("checkpoint '%s' failed" % checkpoint)
        if 'fstrim_warning' in checkpoint:
            # Actually, fstrim has no relationship with v2v, it may be related
            # to kernel, this warning really doesn't matter and has no harm to
            # the conversion.
            V2V_FSTRIM_SUCESS_VER = "[virt-v2v-1.45.1-1.el9,)"
            if utils_v2v.multiple_versions_compare(V2V_FSTRIM_SUCESS_VER):
                params.update({'expect_msg': None})
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        if version_requried and not utils_v2v.multiple_versions_compare(
                version_requried):
            test.cancel("Testing requires version: %s" % version_requried)

        # See man virt-v2v-input-xen(1)
        if enable_legacy_cp:
            process.run('update-crypto-policies --set LEGACY',
                        verbose=True,
                        ignore_status=True,
                        shell=True)

        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'os_storage': os_storage,
            'os_pool': os_pool,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'oo_json_disk_pattern': json_disk_pattern,
            'cmd_has_ip': cmd_has_ip,
            'params': params
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'of_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if 'root' in checkpoint and 'ask' in checkpoint:
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if 'root' in checkpoint and 'ask' not in checkpoint:
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if 'with_proxy' in checkpoint:
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if 'ogac' in checkpoint:
            os.environ['VIRTIO_WIN'] = virtio_win_path
            if not os.path.exists(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

            if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux':
                export_path = os.getenv('VIRTIO_WIN')
                qemu_guest_agent_dir = os.path.join(export_path, qa_path)
                if not os.path.exists(qemu_guest_agent_dir) and os.access(
                        export_path, os.W_OK) and qa_url:
                    logging.debug(
                        'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,'
                        ' Try to prepare it manually. This is not a permanent step, once'
                        ' the official build includes it, this step should be removed.'
                    )
                    os.makedirs(qemu_guest_agent_dir)
                    rpm_name = os.path.basename(qa_url)
                    download.get_file(
                        qa_url, os.path.join(qemu_guest_agent_dir, rpm_name))

        if 'virtio_iso_blk' in checkpoint:
            if not os.path.exists(virtio_win_path):
                test.fail('%s does not exist' % virtio_win_path)

            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup a loop device
            cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path)
            process.run(cmd, shell=True)
            os.environ['VIRTIO_WIN'] = free_loop_dev

        if 'block_dev' in checkpoint:
            os_directory = params_get(params, 'os_directory')
            block_count = params_get(params, 'block_count')
            os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_',
                                                       dir=os_directory)
            diskimage = '%s/diskimage' % os_directory.name
            # Update 'os_directory' for '-os' option
            params['os_directory'] = os_directory.name

            # Create a 1G image
            cmd = 'dd if=/dev/zero of=%s bs=10M count=%s' % (diskimage,
                                                             block_count)
            process.run(cmd, shell=True)
            # Build filesystem
            cmd = 'mkfs.ext4 %s' % diskimage
            process.run(cmd, shell=True)
            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup the image as a block device
            cmd = 'losetup %s %s' % (free_loop_dev, diskimage)
            process.run(cmd, shell=True)
            # Create a soft link to the loop device
            blk_dev_link = '%s/mydisk1' % os_directory.name
            cmd = 'ln -s %s %s' % (free_loop_dev, blk_dev_link)
            process.run(cmd, shell=True)

        if 'invalid_pem' in checkpoint:
            # simply change the 2nd line to lowercase to get an invalid pem
            with open(local_ca_file_path, 'r+') as fd:
                for i in range(2):
                    pos = fd.tell()
                    res = fd.readline()
                fd.seek(pos)
                fd.write(res.lower())
                fd.flush()

        if 'without_default_net' in checkpoint:
            net_name = find_net('virbr0')
            if net_name:
                destroy_net(net_name)

        if 'bandwidth' in checkpoint:
            dynamic_speeds = params_get(params, 'dynamic_speeds')
            bandwidth_file = params_get(params, 'bandwidth_file')
            with open(bandwidth_file, 'w') as fd:
                fd.write(dynamic_speeds)

        if checkpoint[0].startswith('virtio_win'):
            cp = checkpoint[0]
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if cp.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if cp.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if cp.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if cp.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path

        if 'luks_dev_keys' in checkpoint:
            luks_password = params_get(params, 'luks_password', '')
            luks_keys = params_get(params, 'luks_keys', '')
            keys_options = ' '.join(
                list(
                    map(lambda i: '--key %s' % i if i else '',
                        luks_keys.split(';'))))

            if 'invalid_pwd_file' not in checkpoint:
                is_file_key = r'--key \S+:file:(\S+)'
                for file_key in re.findall(is_file_key, keys_options):
                    with open(file_key, 'w') as fd:
                        fd.write(luks_password)
            v2v_params['v2v_opts'] += ' ' + keys_options

        if 'empty_cdrom' in checkpoint:
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'auto_close': True,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
            remote_virsh.close_session()
        else:
            if 'exist_uuid' in checkpoint:
                auto_clean = False
            if checkpoint[0] in [
                    'mismatched_uuid', 'no_uuid', 'invalid_source',
                    'system_rhv_pem'
            ]:
                cmd_only = True
                auto_clean = False
            v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only,
                                           interaction_run)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        if 'system_rhv_pem' in checkpoint:
            if 'set' in checkpoint:
                global_pem_setup(local_ca_file_path)
            rhv_cafile = r'-oo rhv-cafile=\S+\s*'
            new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_cafile)
            logging.debug('New v2v command:\n%s', new_cmd)
        if 'mismatched_uuid' in checkpoint:
            # append more uuid
            new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4())
        if 'no_uuid' in checkpoint:
            rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*'
            new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_disk_uuid)
            logging.debug('New v2v command:\n%s', new_cmd)
        if 'exist_uuid' in checkpoint:
            # Use to cleanup the VM because it will not be run in check_result
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            # Update name to avoid conflict
            new_vm_name = v2v_params['new_name'] + '_exist_uuid'
            new_cmd = v2v_result.command.replace('-on %s' % vm_name,
                                                 '-on %s' % new_vm_name)
            new_cmd += ' --no-copy'
            logging.debug('re-run v2v command:\n%s', new_cmd)
        if 'invalid_source' in checkpoint:
            if params.get('invalid_vpx_hostname'):
                new_cmd = v2v_result.replace(
                    vpx_hostname, params.get('invalid_vpx_hostname'))
            if params.get('invalid_esx_hostname'):
                new_cmd = v2v_result.replace(
                    esxi_host, params.get('invalid_esx_hostname'))

        if checkpoint[0] in [
                'mismatched_uuid', 'no_uuid', 'invalid_source', 'exist_uuid',
                'system_rhv_pem'
        ]:
            v2v_result = utils_v2v.cmd_run(new_cmd,
                                           params.get('v2v_dirty_resources'))

        check_result(v2v_result, status_error)

    finally:
        if enable_legacy_cp:
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
        if checkpoint[0].startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
        if 'virtio_win_iso_mount' in checkpoint:
            process.run('umount /opt', ignore_status=True)
        if 'ogac' in checkpoint and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if 'block_dev' in checkpoint and hasattr(os_directory, 'name'):
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os_directory.cleanup()
        if 'virtio_iso_blk' in checkpoint:
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os.environ.pop('VIRTIO_WIN')
        if 'system_rhv_pem' in checkpoint and 'set' in checkpoint:
            global_pem_cleanup()
        if 'without_default_net' in checkpoint:
            if net_name:
                start_net(net_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if 'with_proxy' in checkpoint:
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        if unprivileged_user:
            process.system("userdel -fr %s" % unprivileged_user)
        if params.get('os_directory') and os.path.isdir(
                params['os_directory']):
            shutil.rmtree(params['os_directory'], ignore_errors=True)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Beispiel #2
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    version_requried = params.get("version_requried")
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    json_disk_pattern = params.get('json_disk_pattern')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    os_version = params.get('os_version')
    os_type = params.get('os_type')
    virtio_win_path = params.get('virtio_win_path')
    # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso
    qa_path = params.get('qa_path')
    # download url of qemu-guest-agent
    qa_url = params.get('qa_url')
    v2v_sasl = None
    # default values for v2v_cmd
    auto_clean = True
    cmd_only = False

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if check == 'vmtools':
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        elif check == 'vmtools_service':
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        if os_version == 'rhel7':
            chkfiles = [
                '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
            ]
        if os_version == 'rhel6':
            chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf']
        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_windows_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        try:
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info('qemu-ga'), re.I),
                                      300,
                                      step=30)
        except ShellProcessTerminatedError:
            # Windows guest may reboot after installing qemu-ga service
            logging.debug('Windows guest is rebooting')
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            vmcheck.create_session()
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info('qemu-ga'), re.I),
                                      300,
                                      step=30)

        if not res:
            test.fail('Not found running qemu-ga service')

    def check_linux_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vender = vmcheck.get_vm_os_vendor()
            if vender in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        if os.path.isfile(os.getenv('VIRTIO_WIN')):
            mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                              'rhv_tools_setup_iso',
                                              fstype='iso9660')
            export_path = params['tmp_mount_point'] = mount_point
        else:
            export_path = os.getenv('VIRTIO_WIN')

        qemu_guest_agent_dir = os.path.join(export_path, qa_path)
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent verion in vm: %s' % vm_pkg_ver)

        # If qemu-guest-agent version in VM is higher than the pkg in qemu-guest-agent-iso,
        # v2v will not update the qemu-guest-agent version and report a warning.
        #
        # e.g.
        # virt-v2v: warning: failed to install QEMU Guest Agent: command:         package
        # qemu-guest-agent-10:2.12.0-3.el7.x86_64 (which is newer than
        # qemu-guest-agent-10:2.12.0-2.el7.x86_64) is already installed
        if not any([vm_pkg_ver in pkg for pkg in all_pkgs]):
            logging.debug(
                'Wrong qemu-guest-agent version, maybe it is higher than package version in ISO'
            )
            logging.info(
                'Unexpected qemu-guest-agent version, set v2v log checking')
            expect_msg_ptn = r'virt-v2v: warning: failed to install QEMU Guest Agent.*?is newer than.*? is already installed'
            params.update({'msg_content': expect_msg_ptn, 'expect_msg': 'yes'})

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_ubuntools(vmcheck):
        """
        Check open-vm-tools, ubuntu-server in VM

        :param vmcheck: VMCheck object for vm checking
        """
        logging.info('Check if open-vm-tools service stopped')
        status = utils_misc.get_guest_service_status(vmcheck.session,
                                                     'open-vm-tools')
        logging.info('Service open-vm-tools status: %s', status)
        if status != 'inactive':
            log_fail('Service open-vm-tools is not stopped')
        else:
            logging.info('Check if the ubuntu-server exist')
            content = vmcheck.session.cmd('dpkg -s ubuntu-server')
            if 'install ok installed' in content:
                logging.info('ubuntu-server has not been removed.')
            else:
                log_fail('ubuntu-server has been removed')

    def global_pem_setup(f_pem):
        """
        Setup global rhv server ca

        :param f_pem: ca file path
        """
        ca_anchors_dir = '/etc/pki/ca-trust/source/anchors'
        shutil.copy(f_pem, ca_anchors_dir)
        process.run('update-ca-trust extract', shell=True)
        os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem)))

    def global_pem_cleanup():
        """
        Cleanup global rhv server ca
        """
        process.run('update-ca-trust extract', shell=True)

    def cmd_remove_option(cmd, opt_pattern):
        """
        Remove an option from cmd

        :param cmd: the cmd
        :param opt_pattern: a pattern stands for the option
        """
        for item in re.findall(opt_pattern, cmd):
            cmd = cmd.replace(item, '').strip()
        return cmd

    def find_net(bridge_name):
        """
        Find which network use specified bridge

       :param bridge_name: bridge name you want to find
        """
        net_list = virsh.net_state_dict(only_names=True)
        net_name = ''
        if len(net_list):
            for net in net_list:
                net_info = virsh.net_info(net).stdout.strip()
                search = re.search(r'Bridge:\s+(\S+)', net_info)
                if search:
                    if bridge_name == search.group(1):
                        net_name = net
        else:
            logging.info('Conversion server has no network')
        return net_name

    def destroy_net(net_name):
        """
        destroy network in conversion server
        """
        if virsh.net_state_dict()[net_name]['active']:
            logging.info("Remove network %s in conversion server", net_name)
            virsh.net_destroy(net_name)
            if virsh.net_state_dict()[net_name]['autostart']:
                virsh.net_autostart(net_name, "--disable")
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def start_net(net_name):
        """
        start network in conversion server
        """
        logging.info("Recover network %s in conversion server", net_name)
        virsh.net_autostart(net_name)
        if not virsh.net_state_dict()[net_name]['active']:
            virsh.net_start(net_name)
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            if checkpoint == 'ogac':
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    check_windows_ogac(vmchecker.checker)
                else:
                    check_linux_ogac(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
                virsh_session.close()
            if checkpoint.startswith('vmtools'):
                check_vmtools(vmchecker.checker, checkpoint)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'resume_swap':
                check_resume_swap(vmchecker.checker)
            if checkpoint == 'rhev_file':
                check_rhev_file_exist(vmchecker.checker)
            if checkpoint == 'file_architecture':
                check_file_architecture(vmchecker.checker)
            if checkpoint == 'ubuntu_tools':
                check_ubuntools(vmchecker.checker)
            if checkpoint == 'without_default_net':
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)

        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        if version_requried and not utils_v2v.compare_version(
                version_requried):
            test.cancel("Testing requries version: %s" % version_requried)

        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'os_storage': os_storage,
            'os_pool': os_pool,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'oo_json_disk_pattern': json_disk_pattern,
            'params': params
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'of_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'ogac':
            os.environ['VIRTIO_WIN'] = virtio_win_path
            if not os.path.exists(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

            if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux':
                export_path = os.getenv('VIRTIO_WIN')
                qemu_guest_agent_dir = os.path.join(export_path, qa_path)
                if not os.path.exists(qemu_guest_agent_dir) and os.access(
                        export_path, os.W_OK) and qa_url:
                    logging.debug(
                        'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,'
                        ' Try to prepare it manually. This is not a permanant step, once'
                        ' the official build includes it, this step should be removed.'
                    )
                    os.makedirs(qemu_guest_agent_dir)
                    rpm_name = os.path.basename(qa_url)
                    download.get_file(
                        qa_url, os.path.join(qemu_guest_agent_dir, rpm_name))

        if checkpoint == 'virtio_iso_blk':
            if not os.path.exists(virtio_win_path):
                test.fail('%s does not exist' % virtio_win_path)

            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup a loop device
            cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path)
            process.run(cmd, shell=True)
            os.environ['VIRTIO_WIN'] = free_loop_dev

        if checkpoint == 'invalid_pem':
            # simply change the 2nd line to lowercase to get an invalid pem
            with open(local_ca_file_path, 'r+') as fd:
                for i in range(2):
                    pos = fd.tell()
                    res = fd.readline()
                fd.seek(pos)
                fd.write(res.lower())
                fd.flush()

        if checkpoint == 'without_default_net':
            net_name = find_net('virbr0')
            if net_name:
                destroy_net(net_name)

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'auto_close': True,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
            remote_virsh.close_session()
        else:
            if checkpoint == 'exist_uuid':
                auto_clean = False
            if checkpoint in [
                    'mismatched_uuid', 'no_uuid', 'system_rhv_pem_set',
                    'system_rhv_pem_unset'
            ]:
                cmd_only = True
                auto_clean = False
            v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        if checkpoint.startswith('system_rhv_pem'):
            if checkpoint == 'system_rhv_pem_set':
                global_pem_setup(local_ca_file_path)
            rhv_cafile = r'-oo rhv-cafile=\S+\s*'
            new_cmd = cmd_remove_option(v2v_result, rhv_cafile)
            logging.debug('New v2v command:\n%s', new_cmd)
        if checkpoint == 'mismatched_uuid':
            # append more uuid
            new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4())
        if checkpoint == 'no_uuid':
            rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*'
            new_cmd = cmd_remove_option(v2v_result, rhv_disk_uuid)
            logging.debug('New v2v command:\n%s', new_cmd)
        if checkpoint == 'exist_uuid':
            new_vm_name = v2v_params['new_name'] + '_exist_uuid'
            new_cmd = v2v_result.command.replace('-on %s' % vm_name,
                                                 '-on %s' % new_vm_name)
            logging.debug('re-run v2v command:\n%s', new_cmd)

        if checkpoint in [
                'mismatched_uuid', 'no_uuid', 'exist_uuid',
                'system_rhv_pem_set', 'system_rhv_pem_unset'
        ]:
            v2v_result = utils_v2v.cmd_run(new_cmd,
                                           params.get('v2v_dirty_resources'))

        check_result(v2v_result, status_error)

    finally:
        if checkpoint == 'ogac' and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if checkpoint == 'virtio_iso_blk':
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os.environ.pop('VIRTIO_WIN')
        if checkpoint == 'system_rhv_pem_set':
            global_pem_cleanup()
        if checkpoint == 'without_default_net':
            if net_name:
                start_net(net_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.error('Missing command: virt-v2v')
    # Guest name might be changed, we need a new variant to save the original
    # name
    vm_name = params['original_vm_name'] = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    input_mode = params.get('input_mode')
    input_file = params.get('input_file')
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    os_pool = output_storage = params.get('output_storage', 'default')
    bridge = params.get('bridge')
    network = params.get('network')
    address_cache = env.get('address_cache')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_virsh_pre_conn = params.get('skip_virsh_pre_conn', 'no')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    checkpoint = params.get('checkpoint', '')
    datastore = params.get('datastore')
    esxi_host = params.get('esx_hostname')
    esxi_password = params.get('esxi_password')
    hypervisor = params.get("hypervisor")
    input_transport = params.get("input_transport")
    vmx_nfs_src = params.get("vmx_nfs_src")
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    vpx_dc = params.get("vpx_dc")
    vpx_hostname = params.get("vpx_hostname")
    vpx_password = params.get("vpx_password")
    src_uri_type = params.get('src_uri_type')
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get('v2v_opts'):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    error_list = []

    # create different sasl_user name for different job
    if output_mode == 'rhev':
        params.update({
            'sasl_user':
            params.get("sasl_user") + utils_misc.generate_random_string(3)
        })
        logging.info('sals user name is %s' % params.get("sasl_user"))
        if output_method == 'rhv_upload':
            # Create password file for '-o rhv_upload' to connect to ovirt
            with open(rhv_passwd_file, 'w') as f:
                f.write(rhv_passwd)
            # Copy ca file from ovirt to local
            remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                   ovirt_engine_passwd, ovirt_ca_file_path,
                                   local_ca_file_path)

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_BSOD():
        """
        Check if boot up into BSOD
        """
        bar = 0.999
        match_img = params.get('image_to_match')
        screenshot = '%s/BSOD_screenshot.ppm' % data_dir.get_tmp_dir()
        if match_img is None:
            test.error('No BSOD screenshot to match!')
        cmd_man_page = 'man virt-v2v|grep -i "Boot failure: 0x0000007B"'
        if process.run(cmd_man_page, shell=True).exit_status != 0:
            log_fail('Man page not contain boot failure msg')
        for i in range(100):
            virsh.screenshot(vm_name, screenshot)
            similar = ppm_utils.image_histogram_compare(screenshot, match_img)
            if similar > bar:
                logging.info('Meet BSOD with similarity %s' % similar)
                return
            time.sleep(1)
        log_fail('No BSOD as expected')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check():
            """
            Checking the VM
            """
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # Create vmchecker before virsh.start so that the vm can be undefined
            # if started failed.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after convertion
            if params.get('skip_vm_check') != 'yes':
                if checkpoint != 'win2008r2_ostk':
                    ret = vmchecker.run()
                    if len(ret) == 0:
                        logging.info("All common checkpoints passed")
                if checkpoint == 'win2008r2_ostk':
                    check_BSOD()
                # Merge 2 error lists
                error_list.extend(vmchecker.errors)

        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            vm_check()
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'main_vm': vm_name,
            'target': target,
            'v2v_opts': v2v_opts,
            'os_storage': output_storage,
            'network': network,
            'bridge': bridge,
            'input_mode': input_mode,
            'input_file': input_file,
            'new_name': 'ova_vm_' + utils_misc.generate_random_string(3),
            'datastore': datastore,
            'esxi_host': esxi_host,
            'esxi_password': esxi_password,
            'input_transport': input_transport,
            'vmx_nfs_src': vmx_nfs_src,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'os_pool': os_pool,
            'rhv_upload_opts': rhv_upload_opts,
            'params': params
        }
        if input_mode == 'vmx':
            v2v_params.update({
                'new_name':
                vm_name + utils_misc.generate_random_string(3),
                'hypervisor':
                hypervisor,
                'vpx_dc':
                vpx_dc,
                'password':
                vpx_password if src_uri_type != 'esx' else esxi_password,
                'hostname':
                vpx_hostname,
                'skip_virsh_pre_conn':
                skip_virsh_pre_conn
            })
        # copy ova from nfs storage before v2v conversion
        if input_mode == 'ova':
            src_dir = params.get('ova_dir')
            dest_dir = params.get('ova_copy_dir')
            logging.info('Copy ova from %s to %s', src_dir, dest_dir)
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            else:
                logging.debug('%s already exists, Skip copying' % dest_dir)
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Build rhev related options
        if output_mode == 'rhev':
            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()

        if checkpoint == 'ova_relative_path':
            logging.debug('Current dir: %s', os.getcwd())
            ova_dir = params.get('ova_dir')
            logging.info('Change to dir: %s', ova_dir)
            os.chdir(ova_dir)

        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        if checkpoint == 'permission':
            os.environ['LIBGUESTFS_BACKEND'] = ''
        process.run('echo $LIBGUESTFS_BACKEND', shell=True)

        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        check_result(v2v_result, status_error)
    finally:
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        if input_mode == 'ova' and os.path.exists(dest_dir):
            shutil.rmtree(dest_dir)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if input_mode == 'vmx' and input_transport == 'ssh':
            process.run("ssh-agent -k")
Beispiel #4
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    os_pool = storage = params.get('output_storage')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_vm_check = params.get('skip_vm_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = [
        'floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk',
        'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt',
        'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
        'listen_none', 'listen_socket', 'only_net', 'only_br'
    ]
    error_list = []

    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'auto_close': True,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        LOG.debug('a new virsh session %s was created', v2v_virsh)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """
        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                LOG.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        LOG.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            LOG.debug("Disks in VM: %s", output)
            disks = len(re.findall(r'Disk\s\d', output))
        LOG.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            LOG.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        LOG.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall(r'(\* kernel.*?\/boot\/config){1,}',
                                 v2v_output, re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        LOG.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall(r'((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)',
                                     str_i)
            LOG.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be
            # same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd(
            'rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines(
        )
        LOG.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error(
                'Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after conversion
        """
        blk = vmcheck.session.cmd('lsblk')
        LOG.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {
            'driver': 'qemu',
            'subdriver': 'raw',
            'sourcetype': 'file',
            'type': type,
            'targetbus': bus[type]
        }
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        LOG.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all_by_expr('device==disk')
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev',
                       dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        LOG.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, vmxml):
        """
        Check if number and type of network cards meet expectation
        """
        xmltree = xml_utils.XMLTreeFile(vmxml)
        iface_nodes = xmltree.find('devices').findall('interface')
        iflist = {}
        for node in iface_nodes:
            mac_addr = node.find('mac').get('address')
            iflist[mac_addr] = node

        LOG.debug('MAC list before v2v: %s' % mac_list)
        LOG.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {
            'root': 'e2label %s ROOT',
            'swap': 'swaplabel -L SWAPPER %s'
        }
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            LOG.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            LOG.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        LOG.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_guestfish = "guestfish get-cachedir"
        tmp_dir = session.cmd_output(cmd_guestfish).split()[-1]
        LOG.debug('Command output of tmp_dir: %s', tmp_dir)
        cmd_df = "df -m %s --output=avail" % tmp_dir
        df_output = session.cmd(cmd_df).strip()
        LOG.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        LOG.info('Available space: %dM' % avail)
        if avail <= left_space - 1:
            return None
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        large_file = os.path.join(tmp_dir, 'file.large')
        cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                     (large_file, avail - left_space + 2)
        session.cmd(cmd_create, timeout=v2v_timeout)
        newAvail = int(session.cmd(cmd_df).strip().split('\n')[-1])
        LOG.info('New Available space: %sM' % newAvail)
        return large_file

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        LOG.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            LOG.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST;
        # 3min 48s ago
        firewalld_status = session.cmd(
            'systemctl status firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd(
            'systemctl status '
            'firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Execute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            LOG.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            LOG.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        LOG.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after conversion.
        """
        LOG.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        LOG.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        LOG.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            LOG.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            LOG.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s' % str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            LOG.debug(vmchecker.vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removable devices(floppy,
                # cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'], vmchecker.vmxml)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmchecker.xmltree.find(
                        './devices/video/model').get('type')
                    if utils_v2v.multiple_versions_compare(
                            V2V_ADAPTE_SPICE_REMOVAL_VER):
                        expect_video_type = 'vga'
                    else:
                        expect_video_type = 'qxl'

                    if video_type.lower() != expect_video_type:
                        log_fail('Video expect %s, actual %s' %
                                 (expect_video_type, video_type))
            if checkpoint.startswith('listen'):
                listen_type = vmchecker.xmltree.find(
                    './devices/graphics/listen').get('type')
                LOG.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                LOG.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'check_selinuxtype':
                expect_output = vmchecker.checker.session.cmd(
                    'cat /etc/selinux/config')
                expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$',
                                               expect_output,
                                               re.MULTILINE).group(1)
                actual_output = vmchecker.checker.session.cmd('sestatus')
                actual_selinuxtype = re.search(
                    r'^Loaded policy name:\s*(\S+)$', actual_output,
                    re.MULTILINE).group(1)
                if actual_selinuxtype != expect_selinuxtype:
                    log_fail('Seliunx type not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'os_storage': storage,
            'os_pool': os_pool,
            'hostname': source_ip,
            'password': source_pwd,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3),
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'input_transport': input_transport,
            'vcenter_host': source_ip,
            'vcenter_password': source_pwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'params': params,
        }
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        utils_v2v.set_libguestfs_backend(params)

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')
            params['vm_machine'] = ori_vm_xml.xmltreefile.find(
                './os/type').get('machine')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = len(new_xml.get_disk_all_by_expr('device==disk'))
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            if params['vm_machine'] and 'q35' in params['vm_machine'] and int(
                    re.search(r'pc-q35-rhel(\d+)\.',
                              params['vm_machine']).group(1)) >= 8:
                test.cancel(
                    'Device isa-fdc is not supported with machine type %s' %
                    params['vm_machine'])
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            large_file = create_large_file(session, 800)
            if checkpoint == 'host_no_space_setcache':
                LOG.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            LOG.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {
                    'type': 'spice',
                    'passwd': params.get('spice_passwd', 'redhat')
                }
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {
                    'type': 'spice',
                    'passwdValidTo': '1970-01-01T00:00:01'
                }
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                LOG.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    LOG.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find('devices').find(
                        'video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [
                            video.attrib.pop(attr_i)
                            for attr_i in ['ram', 'vgamem']
                            if attr_i in video.attrib
                        ]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find('devices').find('graphics').find(
                'listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            LOG.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            LOG.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            LOG.info('Set service chronyd on')
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server
            ]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            LOG.info('Sync time with %s', ntp_server)
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server, 'chronyc waitsync'
            ]
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            LOG.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            LOG.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            LOG.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            LOG.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            LOG.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            LOG.info('Attach network')
            virsh.attach_interface(vm_name,
                                   'network default --current',
                                   debug=True)
        if checkpoint == 'only_br':
            LOG.info('Attatch bridge')
            virsh.attach_interface(vm_name,
                                   'bridge virbr0 --current',
                                   debug=True)
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            LOG.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh and v2v_virsh:
            LOG.debug('virsh session %s is closing', v2v_virsh)
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run('ssh-agent -k')
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status(
                    'firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            if large_file and os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            logging.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        logging.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        logging.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        logging.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name,
                            session_id=vmcheck.virsh_session_id).stdout
        logging.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_rhsrvany_md5(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        logging.info('Check md5 and sha1 of rhsrvany.exe')
        val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1')
        logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1)
        if not val_md5 or not val_sha1:
            test.error('No MD5 or SHA1 value provided')
        cmd_sha1 = params.get('cmd_sha1')
        cmd_md5 = cmd_sha1 + ' MD5'
        sha1 = vmcheck.session.cmd_output(
            cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '')
        md5 = vmcheck.session.cmd_output(
            cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '')
        logging.info('Actual MD5=%s, SHA1=%s', md5, sha1)
        if sha1 == val_sha1 and md5 == val_md5:
            logging.info('MD5 and SHA1 are correct')
        else:
            log_fail('MD5 or SHA1 of rhsrvany.exe not correct')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        logging.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        logging.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error and checkpoint != 'vdsm':
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s', str(e))
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            else:
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
            # Check specific checkpoints
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'sdl':
                if output_mode == 'libvirt':
                    vmchecker.check_graphics({'type': 'vnc'})
                elif output_mode == 'rhev':
                    vmchecker.check_graphics({'type': 'spice'})
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'rhsrvany_md5':
                check_rhsrvany_md5(vmchecker.checker)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        # Virtio drivers will not be installed without virtio-win setup
        if checkpoint == 'virtio_win_unset':
            missing_list = params.get('missing').split(',')
            expect_errors = ['Not find driver: ' + x for x in missing_list]
            logging.debug('Expect errors: %s' % expect_errors)
            logging.debug('Actual errors: %s' % error_list)
            if set(error_list) == set(expect_errors):
                error_list[:] = []
            else:
                logging.error('Virtio drivers not meet expectation')
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': xen_host,
            'hypervisor': 'xen',
            'main_vm': vm_name,
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'password': xen_host_passwd,
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Setup ssh-agent access to xen hypervisor
        logging.info('set up ssh-agent access ')
        ssh_key.setup_ssh_key(xen_host,
                              user=xen_host_user,
                              port=22,
                              password=xen_host_passwd)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'output_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # To RHV doesn't support 'qcow2' right now
            v2v_params['output_format'] = 'raw'
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)

        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        virsh_instance = virsh.VirshPersistent()
        virsh_instance.set_uri(uri)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params['hypervisor'] = 'kvm'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['storage'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name,
                                               params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get('vnc_passwd', 'redhat')
                }
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(vmxml,
                                               params[checkpoint]['passwd'],
                                               virsh_instance=virsh_instance)
            logging.debug(
                virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            logging.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            logging.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            logging.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                logging.debug(process.run('cat %s' % xml_file).stdout_text)
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            logging.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                        ssh_banner_content)
            logging.info('Content of ssh_banner file:')
            logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            logging.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if checkpoint.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if checkpoint.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if checkpoint.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if checkpoint.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            logging.debug(xml.xmltreefile)
            disks = xml.get_disk_all()
            logging.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.get(
                        'device') == 'cdrom' and disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            logging.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                logging.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            logging.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            logging.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            logging.info('Total disks: %d', params['disk_count'])

        # Check if xen guest exists again
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        process.run('ssh-agent -k')
        if checkpoint == 'vdsm':
            logging.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    logging.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            logging.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
            virsh_instance.close_session()
        if checkpoint == 'ssh_banner':
            logging.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    storage = params.get('storage')
    storage_name = params.get('storage_name')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_passwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    v2v_opts = params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split('/')[2]
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # create different sasl_user name for different job
    params.update({
        'sasl_user':
        params.get("sasl_user") + utils_misc.generate_random_string(3)
    })
    logging.info('sals user name is %s' % params.get("sasl_user"))

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unspported hypervisor: %s" % hypervisor)

    if output_method == 'rhv_upload':
        # Create password file for '-o rhv_upload' to connect to ovirt
        with open(rhv_passwd_file, 'w') as f:
            f.write(rhv_passwd)
        # Copy ca file from ovirt to local
        remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd,
                               ovirt_ca_file_path, local_ca_file_path)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            test.error("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'storage': storage,
        'hostname': source_ip,
        'new_name': vm_name + utils_misc.generate_random_string(3),
        'output_method': output_method,
        'storage_name': storage_name
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    if rhv_upload_opts:
        v2v_params.update({"rhv_upload_opts": rhv_upload_opts})
    output_format = params.get('output_format')
    # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None
    if output_format:
        v2v_params.update({'output_format': output_format})

    # Set libguestfs environment variable
    if hypervisor in ('xen', 'kvm'):
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        v2v_ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2v verbose messages:\n%s", v2v_ret)
        if v2v_ret.exit_status != 0:
            test.fail("Convert VM failed")

        params['main_vm'] = v2v_params['new_name']

        logging.info("output_method is %s" % output_method)
        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(
                params, address_cache, timeout=v2v_timeout):
            test.error("Import VM failed")

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        ret = vmchecker.run()

        # Other checks
        err_list = []
        os_list = [
            'win8', 'win8.1', 'win10', 'win2012', 'win2012r2', 'win2008'
        ]
        win_version = ['6.2', '6.3', '10.0', '6.2', '6.3']
        os_map = dict(list(zip(os_list, win_version)))
        vm_arch = params.get('vm_arch')
        os_ver = params.get('os_version')

        if os_ver in os_list:
            vga_log = 'The guest will be configured to use a basic VGA ' \
                      'display driver'
            if re.search(vga_log, v2v_ret.stdout):
                logging.debug('Found vga log')
            else:
                err_list.append('Not find vga log')
            if os_ver != 'win2008':
                qxl_warn = 'virt-v2v: warning: there is no QXL driver for ' \
                           'this version of Windows \(%s[.\s]*?%s\)' %\
                           (os_map[os_ver], vm_arch)
                if re.search(qxl_warn, v2v_ret.stdout):
                    logging.debug('Found QXL warning')
                else:
                    err_list.append('Not find QXL warning')

        ret.extend(err_list)

        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            test.fail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        vmcheck = utils_v2v.VMCheck(test, params, env)
        vmcheck.cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if check == 'vmtools':
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        elif check == 'vmtools_service':
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        # Only for grub2
        chkfiles = [
            '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
        ]

        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vender = vmcheck.get_vm_os_vendor()
            if vender in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                          'rhv_tools_setup_iso',
                                          fstype='iso9660')
        params['tmp_mount_point'] = mount_point
        qemu_guest_agent_dir = os.path.join(mount_point, 'linux')
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('All packages in qemu-guest-agent-iso: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent verion in vm: %s' % vm_pkg_ver)

        # If qemu-guest-agent version in VM is higher than the pkg in qemu-guest-agent-iso,
        # v2v will not update the qemu-guest-agent version and report a warning.
        #
        # e.g.
        # virt-v2v: warning: failed to install QEMU Guest Agent: command:         package
        # qemu-guest-agent-10:2.12.0-3.el7.x86_64 (which is newer than
        # qemu-guest-agent-10:2.12.0-2.el7.x86_64) is already installed
        if not any([vm_pkg_ver in pkg for pkg in all_pkgs]):
            logging.debug(
                'Wrong qemu-guest-agent version, maybe it is higher than package version in ISO'
            )
            logging.info(
                'Unexpected qemu-guest-agent version, set v2v log checking')
            expect_msg_ptn = r'virt-v2v: warning: failed to install QEMU Guest Agent.*?is newer than.*? is already installed'
            params.update({'msg_content': expect_msg_ptn, 'expect_msg': 'yes'})

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if checkpoint == 'empty_cdrom':
            if status_error:
                log_fail('Virsh dumpxml failed for empty cdrom image')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if checkpoint != 'GPO_AV':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
            if checkpoint.startswith('vmtools'):
                check_vmtools(vmchecker.checker, checkpoint)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'resume_swap':
                check_resume_swap(vmchecker.checker)
            if checkpoint == 'rhev_file':
                check_rhev_file_exist(vmchecker.checker)
            if checkpoint == 'file_architecture':
                check_file_architecture(vmchecker.checker)
            if checkpoint == 'ogac':
                check_ogac(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'output_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'ogac':
            rhv_iso_path = '/usr/share/rhv-guest-tools-iso/rhv-tools-setup.iso'
            os.environ['VIRTIO_WIN'] = rhv_iso_path
            if not os.path.isfile(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
        else:
            v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)

    finally:
        if checkpoint == 'ogac' and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Beispiel #8
0
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    target = params.get("target")
    v2v_opts = params.get("v2v_opts")

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_pwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            process.run("ssh-agent -k")
            raise exceptions.TestError("Fail to setup ssh-agent")
    else:
        raise exceptions.TestSkipError("Unspported hypervisor: %s" %
                                       hypervisor)

    # Create libvirt URI for the source node
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("Remote host uri for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    virsh_dargs = {
        'uri': remote_uri,
        'remote_ip': source_ip,
        'remote_user': source_user,
        'remote_pwd': source_pwd,
        'debug': True
    }
    remote_virsh = virsh.VirshPersistent(**virsh_dargs)
    try:
        if not remote_virsh.domain_exists(vm_name):
            raise exceptions.TestError("VM '%s' not exist" % vm_name)
    finally:
        remote_virsh.close_session()

    # Prepare libvirt storage pool
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    libvirt_pool = utlv.PoolVolumeTest(test, params)
    libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '')

    # Preapre libvirt virtual network
    network = params.get("network")
    net_kwargs = {
        'net_name': network,
        'address': params.get('network_addr'),
        'dhcp_start': params.get('network_dhcp_start'),
        'dhcp_end': params.get('network_dhcp_end')
    }
    libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs)
    net_info = virsh.net_info(network).stdout.strip()
    bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1)
    params['netdst'] = bridge

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'storage': pool_name,
        'hostname': source_ip
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    # Set libguestfs environment
    if hypervisor == 'xen':
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            raise exceptions.TestFail("Convert VM failed")

        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        # Win10 is not supported by some cpu model,
        # need to modify to 'host-model'
        if params.get('os_version') in ['win10', 'win2016']:
            logging.info('Set cpu mode to "host-model" for win10 and win2016')
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.mode = 'host-model'
            cpu_xml.fallback = 'allow'
            vmxml['cpu'] = cpu_xml
            vmxml.sync()
        vm.start()

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        ret = vmchecker.run()
        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            raise exceptions.TestFail("%d checkpoints failed: %s" %
                                      (len(ret), ret))
    finally:
        vmcheck = utils_v2v.VMCheck(test, params, env)
        vmcheck.cleanup()
        utils_v2v.cleanup_constant_files(params)
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        # Clean libvirt VM
        virsh.remove_domain(vm_name)
        # Clean libvirt pool
        if libvirt_pool:
            libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '')
        # Clean libvirt network
        if libvirt_net:
            libvirt_net.cleanup()
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    vm_name = params.get("main_vm")
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    target = params.get("target")
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get('v2v_opts'):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    source_pwd = None

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_pwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception:
            process.run("ssh-agent -k")
            raise exceptions.TestError("Fail to setup ssh-agent")
    else:
        raise exceptions.TestSkipError("Unsupported hypervisor: %s" %
                                       hypervisor)

    # Create libvirt URI for the source node
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("Remote host uri for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    virsh_dargs = {
        'uri': remote_uri,
        'remote_ip': source_ip,
        'remote_user': source_user,
        'remote_pwd': source_pwd,
        'auto_close': True,
        'debug': True
    }
    remote_virsh = virsh.VirshPersistent(**virsh_dargs)
    try:
        if not remote_virsh.domain_exists(vm_name):
            raise exceptions.TestError("VM '%s' not exist" % vm_name)
    finally:
        remote_virsh.close_session()

    # Prepare libvirt storage pool
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    libvirt_pool = utlv.PoolVolumeTest(test, params)
    libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '')

    # Prepare libvirt virtual network
    network = params.get("network")
    net_kwargs = {
        'net_name': network,
        'address': params.get('network_addr'),
        'dhcp_start': params.get('network_dhcp_start'),
        'dhcp_end': params.get('network_dhcp_end')
    }
    libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs)
    net_info = virsh.net_info(network).stdout.strip()
    bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1)
    params['netdst'] = bridge

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'os_pool': pool_name,
        'hostname': source_ip,
        'password': source_pwd,
        'input_transport': input_transport,
        'vcenter_host': vpx_ip,
        'vcenter_password': vpx_pwd,
        'vddk_thumbprint': vddk_thumbprint,
        'vddk_libdir': vddk_libdir,
        'vddk_libdir_src': vddk_libdir_src,
        'params': params
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    # Set libguestfs environment
    if hypervisor == 'xen':
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        ret = utils_v2v.v2v_cmd(v2v_params)
        if ret.exit_status != 0:
            raise exceptions.TestFail("Convert VM failed")

        LOG.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        # Win10 is not supported by some cpu model,
        # need to modify to 'host-model'
        unsupport_list = ['win10', 'win2016', 'win2019']
        if params.get('os_version') in unsupport_list:
            LOG.info('Set cpu mode to "host-model" for %s.', unsupport_list)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.mode = 'host-model'
            cpu_xml.fallback = 'allow'
            vmxml['cpu'] = cpu_xml
            vmxml.sync()
        vm.start()

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        # Cannot do "params['vmchecker'] = vmchecker" if vm was to_libvirt,
        # or exception 'Fatal Python error: Cannot recover from stack overflow'
        # will happen. Not sure how it happens now, if somebody knows about it,
        # Please help to fix it.
        #
        # The exception happens at:
        # avocado/utils/stacktrace.py, line 98 in analyze_unpickable_item
        # <FIXIT> in future.
        try:
            ret = vmchecker.run()
        finally:
            vmchecker.cleanup()

        if len(ret) == 0:
            LOG.info("All checkpoints passed")
        else:
            raise exceptions.TestFail("%d checkpoints failed: %s" %
                                      (len(ret), ret))
    finally:
        utils_v2v.cleanup_constant_files(params)
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        # Clean libvirt VM
        virsh.remove_domain(vm_name)
        # Clean libvirt pool
        if libvirt_pool:
            libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '')
        # Clean libvirt network
        if libvirt_net:
            libvirt_net.cleanup()
Beispiel #10
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if check == 'vmtools':
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        elif check == 'vmtools_service':
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        # Only for grub2
        chkfiles = [
            '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
        ]

        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if checkpoint == 'empty_cdrom':
            if status_error:
                log_fail('Virsh dumpxml failed for empty cdrom image')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if checkpoint != 'GPO_AV':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
            if checkpoint.startswith('vmtools'):
                check_vmtools(vmchecker.checker, checkpoint)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'resume_swap':
                check_resume_swap(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'output_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'copy_to_local':
            esx_password = params.get('esx_password')
            esx_passwd_file = os.path.join(data_dir.get_tmp_dir(),
                                           "esx_passwd")
            logging.info('Prepare esx password file')
            with open(esx_passwd_file, 'w') as pwd_f:
                pwd_f.write(esx_password)
            esx_uri = 'esx://root@%s/?no_verify=1' % esx_ip
            copy_cmd = 'virt-v2v-copy-to-local -ic %s %s --password-file %s' %\
                       (esx_uri, vm_name, esx_passwd_file)
            process.run(copy_cmd)
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params['input_file'] = '%s.xml' % vm_name
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
        else:
            v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)

    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    storage = params.get('storage')
    storage_name = params.get('storage_name')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_passwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    v2v_opts = params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split('/')[2]
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # create different sasl_user name for different job
    params.update({'sasl_user': params.get("sasl_user") +
                   utils_misc.generate_random_string(3)})
    logging.info('sals user name is %s' % params.get("sasl_user"))

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unspported hypervisor: %s" % hypervisor)

    if output_method == 'rhv_upload':
        # Create password file for '-o rhv_upload' to connect to ovirt
        with open(rhv_passwd_file, 'w') as f:
            f.write(rhv_passwd)
        # Copy ca file from ovirt to local
        remote.scp_from_remote(ovirt_hostname, 22, 'root',
                               ovirt_engine_passwd,
                               ovirt_ca_file_path,
                               local_ca_file_path)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip,
                       'remote_user': source_user, 'remote_pwd': source_pwd,
                       'debug': True}
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            test.error("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {'target': target, 'hypervisor': hypervisor,
                  'main_vm': vm_name, 'input_mode': input_mode,
                  'network': network, 'bridge': bridge,
                  'storage': storage, 'hostname': source_ip,
                  'new_name': vm_name + utils_misc.generate_random_string(3),
                  'output_method': output_method, 'storage_name': storage_name,
                  'input_transport': input_transport, 'vcenter_host': vpx_ip,
                  'vcenter_password': vpx_pwd,
                  'vddk_thumbprint': vddk_thumbprint,
                  'vddk_libdir': vddk_libdir,
                  'vddk_libdir_src': vddk_libdir_src,
                  }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    if rhv_upload_opts:
        v2v_params.update({"rhv_upload_opts": rhv_upload_opts})
    output_format = params.get('output_format')
    # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None
    if output_format:
        v2v_params.update({'output_format': output_format})

    # Set libguestfs environment variable
    if hypervisor in ('xen', 'kvm'):
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        v2v_ret = utils_v2v.v2v_cmd(v2v_params)
        if v2v_ret.exit_status != 0:
            test.fail("Convert VM failed")

        params['main_vm'] = v2v_params['new_name']

        logging.info("output_method is %s" % output_method)
        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                            timeout=v2v_timeout):
            test.error("Import VM failed")

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        ret = vmchecker.run()

        # Other checks
        err_list = []
        os_list = [
            'win8',
            'win8.1',
            'win10',
            'win2012',
            'win2012r2',
            'win2008',
            'win2016',
            'win2019']
        win_version = ['6.2', '6.3', '10.0', '6.2', '6.3', '6.0', '10.0', '10.0']
        os_map = dict(list(zip(os_list, win_version)))
        vm_arch = params.get('vm_arch')
        os_ver = params.get('os_version')

        if os_ver in os_list:
            vga_log = 'The guest will be configured to use a basic VGA ' \
                      'display driver'
            if re.search(vga_log, v2v_ret.stdout):
                logging.debug('Found vga log')
            else:
                err_list.append('Not find vga log')
            if os_ver != 'win2008':
                qxl_warn = 'virt-v2v: warning: there is no QXL driver for ' \
                           'this version of Windows \(%s[.\s]*?%s\)' %\
                           (os_map[os_ver], vm_arch)
                if re.search(qxl_warn, v2v_ret.stdout):
                    logging.debug('Found QXL warning')
                else:
                    err_list.append('Not find QXL warning')

        ret.extend(err_list)

        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            test.fail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        vmcheck = utils_v2v.VMCheck(test, params, env)
        vmcheck.cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Beispiel #12
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    storage = params.get('output_storage')
    storage_name = params.get('storage_name')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = params.get('v2v_opts', '-v -x')
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_check = 'yes' == params.get('skip_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = ['floppy', 'floppy_devmap', 'fstab_cdrom',
                   'sata_disk', 'network_rtl8139', 'network_e1000',
                   'spice', 'spice_encrypt', 'spice_qxl',
                   'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
                   'listen_none', 'listen_socket', 'only_net', 'only_br']
    error_list = []

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unspported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {'uri': remote_uri,
                       'remote_ip': source_ip,
                       'remote_user': source_user,
                       'remote_pwd': source_pwd,
                       'debug': True}
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """

        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                logging.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        logging.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            logging.debug("Disks in VM: %s", output)
            disks = len(re.findall('Disk\s\d', output))
        logging.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            logging.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        logging.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall('(\* kernel.*?\/boot\/config){1,}', v2v_output, re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        logging.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall('((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i)
            logging.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd('rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines()
        logging.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error('Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after convertion
        """
        blk = vmcheck.session.cmd('lsblk')
        logging.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file',
                'type': type, 'targetbus': bus[type]}
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        logging.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev', dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        logging.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, virsh_instance):
        """
        Check if number and type of network cards meet expectation
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        iflist = vmxml.get_iface_all()
        logging.debug('MAC list before v2v: %s' % mac_list)
        logging.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {'root': 'e2label %s ROOT',
                   'swap': 'swaplabel -L SWAPPER %s'}
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            logging.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM',
                'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            logging.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        logging.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_df = "df -m / --output=avail"
        df_output = session.cmd(cmd_df).strip()
        logging.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        logging.info('Available space: %dM' % avail)
        if avail > left_space - 1:
            tmp_dir = data_dir.get_tmp_dir()
            if session.cmd_status('ls %s' % tmp_dir) != 0:
                session.cmd('mkdir %s' % tmp_dir)
            large_file = os.path.join(tmp_dir, 'file.large')
            cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                         (large_file, avail - left_space + 2)
            session.cmd(cmd_create, timeout=v2v_timeout)
        logging.info('Available space: %sM' % session.cmd(cmd_df).strip())

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        logging.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            logging.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST; 3min 48s ago
        firewalld_status = session.cmd('systemctl status firewalld.service|grep Active:',
                                       ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group()
        logging.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd('systemctl status '
                                               'firewalld.service|grep Active:',
                                               ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group()
        logging.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Excecute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            logging.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            logging.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        logging.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after convertion.
        """
        logging.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        logging.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        logging.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            logging.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            logging.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s', str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if skip_check:
            logging.info('Skip checking vm after conversion')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after convertion
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=vmchecker.virsh_instance)
            logging.debug(vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removeable devices(floppy, cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'],
                                     vmchecker.virsh_instance)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmxml.get_devices('video')[0].model_type
                    if video_type.lower() != 'qxl':
                        log_fail('Video expect QXL, actual %s' % video_type)
            if checkpoint.startswith('listen'):
                listen_type = vmxml.get_devices('graphics')[0].listen_type
                logging.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                logging.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'storage': storage,
            'hostname': source_ip,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3)}
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'output_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({'sasl_user': params.get("sasl_user") +
                           utils_misc.generate_random_string(3)})
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
        if output_mode == 'local':
            v2v_params['storage'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = 0
            for disk in list(new_xml.get_disk_all().values()):
                if disk.get('device') == 'disk':
                    disk_count += 1
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            logging.info('VM state: %s' %
                         virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            logging.info('VM state: %s' %
                         virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint == 'no_space':
            @vm_shell
            def take_space(**kwargs):
                create_large_file(kwargs['session'], 20)
            take_space()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            create_large_file(session, 1000)
            if checkpoint == 'host_no_space_setcache':
                logging.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            logging.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {'type': 'spice',
                                'passwd': params.get('spice_passwd', 'redhat')}
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {'type': 'spice',
                                      'passwdValidTo': '1970-01-01T00:00:01'}
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                logging.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    logging.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find(
                        'devices').find('video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [video.attrib.pop(attr_i) for attr_i in [
                            'ram', 'vgamem'] if attr_i in video.attrib]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find(
                'devices').find('graphics').find('listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            logging.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            logging.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            logging.info('Set service chronyd on')
            cmd = ['yum -y install chrony',
                   'systemctl start chronyd',
                   'chronyc add server %s' % ntp_server]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            logging.info('Sync time with %s', ntp_server)
            cmd = ['yum -y install chrony',
                   'systemctl start chronyd',
                   'chronyc add server %s' % ntp_server,
                   'chronyc waitsync']
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            logging.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            logging.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            logging.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            logging.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            logging.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            logging.info('Attach network')
            virsh.attach_interface(
                vm_name, 'network default --current', debug=True)
            v2v_params.pop('bridge')
        if checkpoint == 'only_br':
            logging.info('Attatch bridge')
            virsh.attach_interface(
                vm_name, 'bridge virbr0 --current', debug=True)
            v2v_params.pop('network')
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            logging.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh:
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status('firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            large_file = os.path.join(data_dir.get_tmp_dir(), 'file.large')
            if os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vpx_hostname = params.get('vpx_hostname')
    esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    error_list = []
    remote_host = vpx_hostname

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if check == 'vmtools':
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        elif check == 'vmtools_service':
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        # Only for grub2
        chkfiles = ['/etc/default/grub',
                    '/boot/grub2/grub.cfg',
                    '/etc/grub2.cfg']

        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail(
                    'Content of %s is not correct or %s' %
                    (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if checkpoint == 'empty_cdrom':
            if status_error:
                log_fail('Virsh dumpxml failed for empty cdrom image')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if checkpoint not in ['GPO_AV', 'ovmf']:
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
            if checkpoint.startswith('vmtools'):
                check_vmtools(vmchecker.checker, checkpoint)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'resume_swap':
                check_resume_swap(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name,
            'vpx_dc': vpx_dc, 'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': '-v -x', 'input_mode': 'libvirt',
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target')
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd = params.get("vpx_password")
        logging.debug("vpx password is %s" % vpx_passwd)
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " --password-file %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'output_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'ovmf':
            utils_package.package_install('OVMF')
        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'copy_to_local':
            esx_password = params.get('esx_password')
            esx_passwd_file = os.path.join(
                data_dir.get_tmp_dir(), "esx_passwd")
            logging.info('Prepare esx password file')
            with open(esx_passwd_file, 'w') as pwd_f:
                pwd_f.write(esx_password)
            esx_uri = 'esx://root@%s/?no_verify=1' % esx_ip
            copy_cmd = 'virt-v2v-copy-to-local -ic %s %s --password-file %s' %\
                       (esx_uri, vm_name, esx_passwd_file)
            process.run(copy_cmd)
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params['input_file'] = '%s.xml' % vm_name
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s',
                         http_proxy, https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {'uri': remote_uri, 'remote_ip': remote_host,
                           'remote_user': '******', 'remote_pwd': vpx_passwd,
                           'debug': True}
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
        else:
            v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)

    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    target = params.get("target")
    v2v_opts = params.get("v2v_opts")

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_pwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            process.run("ssh-agent -k")
            raise exceptions.TestError("Fail to setup ssh-agent")
    else:
        raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor)

    # Create libvirt URI for the source node
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("Remote host uri for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip,
                   'remote_user': source_user, 'remote_pwd': source_pwd,
                   'debug': True}
    remote_virsh = virsh.VirshPersistent(**virsh_dargs)
    try:
        if not remote_virsh.domain_exists(vm_name):
            raise exceptions.TestError("VM '%s' not exist" % vm_name)
    finally:
        remote_virsh.close_session()

    # Prepare libvirt storage pool
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    libvirt_pool = utlv.PoolVolumeTest(test, params)
    libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '')

    # Preapre libvirt virtual network
    network = params.get("network")
    net_kwargs = {'net_name': network,
                  'address': params.get('network_addr'),
                  'dhcp_start': params.get('network_dhcp_start'),
                  'dhcp_end': params.get('network_dhcp_end')}
    libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs)
    net_info = virsh.net_info(network).stdout.strip()
    bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1)
    params['netdst'] = bridge

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {'target': target, 'hypervisor': hypervisor,
                  'main_vm': vm_name, 'input_mode': input_mode,
                  'network': network, 'bridge': bridge,
                  'storage': pool_name, 'hostname': source_ip}
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    # Set libguestfs environment
    if hypervisor == 'xen':
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            raise exceptions.TestFail("Convert VM failed")

        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        # Win10 is not supported by some cpu model,
        # need to modify to 'host-model'
        unsupport_list = ['win10', 'win2016', 'win2019']
        if params.get('os_version') in unsupport_list:
            logging.info('Set cpu mode to "host-model" for %s.', unsupport_list)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.mode = 'host-model'
            cpu_xml.fallback = 'allow'
            vmxml['cpu'] = cpu_xml
            vmxml.sync()
        vm.start()

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        ret = vmchecker.run()
        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        vmcheck = utils_v2v.VMCheck(test, params, env)
        vmcheck.cleanup()
        utils_v2v.cleanup_constant_files(params)
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        # Clean libvirt VM
        virsh.remove_domain(vm_name)
        # Clean libvirt pool
        if libvirt_pool:
            libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '')
        # Clean libvirt network
        if libvirt_net:
            libvirt_net.cleanup()
Beispiel #15
0
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    os_pool = storage = params.get('storage')
    storage_name = params.get('storage_name')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_passwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split('/')[2]
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    skip_vm_check = params.get('skip_vm_check', 'no')
    os_version = params.get('os_version')
    os_type = params.get('os_type')

    # create different sasl_user name for different job
    params.update({
        'sasl_user':
        params.get("sasl_user") + utils_misc.generate_random_string(3)
    })
    LOG.info('sals user name is %s' % params.get("sasl_user"))

    # Prepare step for different hypervisor
    if hypervisor == "xen":
        # See man virt-v2v-input-xen(1)
        process.run('update-crypto-policies --set LEGACY',
                    verbose=True,
                    ignore_status=True,
                    shell=True)

    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    if output_method == 'rhv_upload':
        # Create password file for '-o rhv_upload' to connect to ovirt
        with open(rhv_passwd_file, 'w') as f:
            f.write(rhv_passwd)
        # Copy ca file from ovirt to local
        remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd,
                               ovirt_ca_file_path, local_ca_file_path)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'auto_close': True,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            test.error("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'os_storage': storage,
        'hostname': source_ip,
        # For virsh connection
        'password': source_pwd,
        'new_name': vm_name + utils_misc.generate_random_string(3),
        'output_method': output_method,
        'os_storage_name': storage_name,
        'input_transport': input_transport,
        'vcenter_host': vpx_ip,
        'vcenter_password': vpx_pwd,
        'vddk_thumbprint': vddk_thumbprint,
        'vddk_libdir': vddk_libdir,
        'vddk_libdir_src': vddk_libdir_src,
        'params': params
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    if rhv_upload_opts:
        v2v_params.update({"rhv_upload_opts": rhv_upload_opts})
    output_format = params.get('output_format')
    # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None
    if output_format:
        v2v_params.update({'of_format': output_format})

    # Set libguestfs environment variable
    if hypervisor in ('xen', 'kvm'):
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        v2v_ret = utils_v2v.v2v_cmd(v2v_params)
        if v2v_ret.exit_status != 0:
            test.fail("Convert VM failed")

        params['main_vm'] = v2v_params['new_name']

        LOG.info("output_method is %s" % output_method)
        # Check all checkpoints after convert
        params['vmchecker'] = vmchecker = VMChecker(test, params, env)
        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(
                params, address_cache, timeout=v2v_timeout):
            test.error("Import VM failed")

        # When VM is on OSP, it can't obtain IP address, therefore
        # skipping the VM checking.
        if skip_vm_check == 'yes':
            LOG.debug("skip vm checking")
            return

        ret = vmchecker.run()
        ret.extend(check_qxl_warning(v2v_ret.stdout_text, os_type, os_version))

        if len(ret) == 0:
            LOG.info("All checkpoints passed")
        else:
            test.fail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
            v2v_sasl.close_session()
        if hypervisor == "xen":
            # Restore crypto-policies to DEFAULT, the setting is impossible to be
            # other values by default in testing environment.
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)