Exemple #1
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    os_pool = storage = params.get('output_storage')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_vm_check = params.get('skip_vm_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = [
        'floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk',
        'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt',
        'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
        'listen_none', 'listen_socket', 'only_net', 'only_br'
    ]
    error_list = []

    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'auto_close': True,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        LOG.debug('a new virsh session %s was created', v2v_virsh)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """
        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                LOG.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        LOG.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            LOG.debug("Disks in VM: %s", output)
            disks = len(re.findall(r'Disk\s\d', output))
        LOG.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            LOG.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        LOG.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall(r'(\* kernel.*?\/boot\/config){1,}',
                                 v2v_output, re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        LOG.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall(r'((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)',
                                     str_i)
            LOG.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be
            # same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd(
            'rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines(
        )
        LOG.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error(
                'Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after conversion
        """
        blk = vmcheck.session.cmd('lsblk')
        LOG.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {
            'driver': 'qemu',
            'subdriver': 'raw',
            'sourcetype': 'file',
            'type': type,
            'targetbus': bus[type]
        }
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        LOG.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all_by_expr('device==disk')
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev',
                       dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        LOG.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, vmxml):
        """
        Check if number and type of network cards meet expectation
        """
        xmltree = xml_utils.XMLTreeFile(vmxml)
        iface_nodes = xmltree.find('devices').findall('interface')
        iflist = {}
        for node in iface_nodes:
            mac_addr = node.find('mac').get('address')
            iflist[mac_addr] = node

        LOG.debug('MAC list before v2v: %s' % mac_list)
        LOG.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {
            'root': 'e2label %s ROOT',
            'swap': 'swaplabel -L SWAPPER %s'
        }
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            LOG.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            LOG.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        LOG.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_guestfish = "guestfish get-cachedir"
        tmp_dir = session.cmd_output(cmd_guestfish).split()[-1]
        LOG.debug('Command output of tmp_dir: %s', tmp_dir)
        cmd_df = "df -m %s --output=avail" % tmp_dir
        df_output = session.cmd(cmd_df).strip()
        LOG.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        LOG.info('Available space: %dM' % avail)
        if avail <= left_space - 1:
            return None
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        large_file = os.path.join(tmp_dir, 'file.large')
        cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                     (large_file, avail - left_space + 2)
        session.cmd(cmd_create, timeout=v2v_timeout)
        newAvail = int(session.cmd(cmd_df).strip().split('\n')[-1])
        LOG.info('New Available space: %sM' % newAvail)
        return large_file

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        LOG.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            LOG.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST;
        # 3min 48s ago
        firewalld_status = session.cmd(
            'systemctl status firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd(
            'systemctl status '
            'firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Execute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            LOG.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            LOG.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        LOG.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after conversion.
        """
        LOG.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        LOG.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        LOG.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            LOG.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            LOG.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s' % str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            LOG.debug(vmchecker.vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removable devices(floppy,
                # cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'], vmchecker.vmxml)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmchecker.xmltree.find(
                        './devices/video/model').get('type')
                    if utils_v2v.multiple_versions_compare(
                            V2V_ADAPTE_SPICE_REMOVAL_VER):
                        expect_video_type = 'vga'
                    else:
                        expect_video_type = 'qxl'

                    if video_type.lower() != expect_video_type:
                        log_fail('Video expect %s, actual %s' %
                                 (expect_video_type, video_type))
            if checkpoint.startswith('listen'):
                listen_type = vmchecker.xmltree.find(
                    './devices/graphics/listen').get('type')
                LOG.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                LOG.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'check_selinuxtype':
                expect_output = vmchecker.checker.session.cmd(
                    'cat /etc/selinux/config')
                expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$',
                                               expect_output,
                                               re.MULTILINE).group(1)
                actual_output = vmchecker.checker.session.cmd('sestatus')
                actual_selinuxtype = re.search(
                    r'^Loaded policy name:\s*(\S+)$', actual_output,
                    re.MULTILINE).group(1)
                if actual_selinuxtype != expect_selinuxtype:
                    log_fail('Seliunx type not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'os_storage': storage,
            'os_pool': os_pool,
            'hostname': source_ip,
            'password': source_pwd,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3),
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'input_transport': input_transport,
            'vcenter_host': source_ip,
            'vcenter_password': source_pwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'params': params,
        }
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        utils_v2v.set_libguestfs_backend(params)

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')
            params['vm_machine'] = ori_vm_xml.xmltreefile.find(
                './os/type').get('machine')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = len(new_xml.get_disk_all_by_expr('device==disk'))
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            if params['vm_machine'] and 'q35' in params['vm_machine'] and int(
                    re.search(r'pc-q35-rhel(\d+)\.',
                              params['vm_machine']).group(1)) >= 8:
                test.cancel(
                    'Device isa-fdc is not supported with machine type %s' %
                    params['vm_machine'])
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            large_file = create_large_file(session, 800)
            if checkpoint == 'host_no_space_setcache':
                LOG.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            LOG.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {
                    'type': 'spice',
                    'passwd': params.get('spice_passwd', 'redhat')
                }
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {
                    'type': 'spice',
                    'passwdValidTo': '1970-01-01T00:00:01'
                }
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                LOG.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    LOG.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find('devices').find(
                        'video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [
                            video.attrib.pop(attr_i)
                            for attr_i in ['ram', 'vgamem']
                            if attr_i in video.attrib
                        ]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find('devices').find('graphics').find(
                'listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            LOG.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            LOG.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            LOG.info('Set service chronyd on')
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server
            ]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            LOG.info('Sync time with %s', ntp_server)
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server, 'chronyc waitsync'
            ]
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            LOG.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            LOG.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            LOG.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            LOG.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            LOG.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            LOG.info('Attach network')
            virsh.attach_interface(vm_name,
                                   'network default --current',
                                   debug=True)
        if checkpoint == 'only_br':
            LOG.info('Attatch bridge')
            virsh.attach_interface(vm_name,
                                   'bridge virbr0 --current',
                                   debug=True)
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            LOG.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh and v2v_virsh:
            LOG.debug('virsh session %s is closing', v2v_virsh)
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run('ssh-agent -k')
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status(
                    'firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            if large_file and os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Exemple #2
0
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    virsh_instance = None

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            LOG.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        LOG.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        LOG.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        LOG.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name,
                            session_id=vmcheck.virsh_session_id).stdout
        LOG.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        LOG.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        LOG.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error and checkpoint != 'vdsm':
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            LOG.info('Checking common checkpoints for v2v')
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            else:
                LOG.info('Skip checking vm after conversion: %s' % skip_reason)
            # Check specific checkpoints
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'hostname': xen_host,
            'hypervisor': 'xen',
            'main_vm': vm_name,
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'password': xen_host_passwd,
            'os_pool': os_pool,
            'os_storage': os_storage,
            'os_storage_name': storage_name,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'rhv_upload_opts': rhv_upload_opts,
            'params': params
        }

        bk_xml = None
        utils_v2v.set_libguestfs_backend(params)

        # See man virt-v2v-input-xen(1)
        process.run('update-crypto-policies --set LEGACY',
                    verbose=True,
                    ignore_status=True,
                    shell=True)

        # Setup ssh-agent access to xen hypervisor
        LOG.info('set up ssh-agent access ')
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(xen_host,
                                                              xen_host_user,
                                                              xen_host_passwd,
                                                              auto_close=False)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'of_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # To RHV doesn't support 'qcow2' right now
            v2v_params['of_format'] = 'raw'
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)
        virsh_dargs = {
            'uri': uri,
            'remote_ip': xen_host,
            'remote_user': '******',
            'remote_pwd': xen_host_passwd,
            'auto_close': True,
            'debug': True
        }
        virsh_instance = utils_v2v.wait_for(virsh.VirshPersistent,
                                            **virsh_dargs)
        LOG.debug('A new virsh session %s was created', virsh_instance)
        if not utils_v2v.wait_for(virsh_instance.domain_exists, name=vm_name):
            test.error('VM %s not exists' % vm_name)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            LOG.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params['hypervisor'] = 'kvm'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['os_pool'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name,
                                               params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get('vnc_passwd', 'redhat')
                }
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(vmxml,
                                               params[checkpoint]['passwd'],
                                               virsh_instance=virsh_instance)
            LOG.debug(virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            LOG.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            LOG.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            LOG.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                LOG.debug(process.run('cat %s' % xml_file).stdout_text)
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            LOG.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                        ssh_banner_content)
            LOG.info('Content of ssh_banner file:')
            LOG.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            LOG.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            LOG.debug(xml.xmltreefile)
            disks = xml.get_disk_all_by_expr('device==cdrom')
            LOG.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            LOG.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                LOG.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            LOG.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            LOG.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()
            LOG.debug('A SASL session %s was created', v2v_sasl)

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            LOG.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            LOG.info('Total disks: %d', params['disk_count'])

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        LOG.info("Cleaning Environment")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        if checkpoint == 'ssh_banner':
            LOG.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        # Restore crypto-policies to DEFAULT, the setting is impossible to be
        # other values by default in testing environment.
        process.run('update-crypto-policies --set DEFAULT',
                    verbose=True,
                    ignore_status=True,
                    shell=True)
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
        if virsh_instance:
            LOG.debug('virsh session %s is closing', virsh_instance)
            virsh_instance.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if checkpoint == 'vdsm':
            LOG.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    LOG.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            LOG.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            LOG.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
        process.run('ssh-agent -k')
Exemple #3
0
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    os_pool = storage = params.get('storage')
    storage_name = params.get('storage_name')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_passwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split('/')[2]
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # create different sasl_user name for different job
    params.update({
        'sasl_user':
        params.get("sasl_user") + utils_misc.generate_random_string(3)
    })
    logging.info('sals user name is %s' % params.get("sasl_user"))

    # Prepare step for different hypervisor
    if hypervisor == "xen":
        # See man virt-v2v-input-xen(1)
        process.run('update-crypto-policies --set LEGACY',
                    verbose=True,
                    ignore_status=True,
                    shell=True)

    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unspported hypervisor: %s" % hypervisor)

    if output_method == 'rhv_upload':
        # Create password file for '-o rhv_upload' to connect to ovirt
        with open(rhv_passwd_file, 'w') as f:
            f.write(rhv_passwd)
        # Copy ca file from ovirt to local
        remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd,
                               ovirt_ca_file_path, local_ca_file_path)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'auto_close': True,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            test.error("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'os_storage': storage,
        'hostname': source_ip,
        # For virsh connection
        'password': source_pwd,
        'new_name': vm_name + utils_misc.generate_random_string(3),
        'output_method': output_method,
        'os_storage_name': storage_name,
        'input_transport': input_transport,
        'vcenter_host': vpx_ip,
        'vcenter_password': vpx_pwd,
        'vddk_thumbprint': vddk_thumbprint,
        'vddk_libdir': vddk_libdir,
        'vddk_libdir_src': vddk_libdir_src,
        'params': params
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    if rhv_upload_opts:
        v2v_params.update({"rhv_upload_opts": rhv_upload_opts})
    output_format = params.get('output_format')
    # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None
    if output_format:
        v2v_params.update({'of_format': output_format})

    # Set libguestfs environment variable
    if hypervisor in ('xen', 'kvm'):
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        v2v_ret = utils_v2v.v2v_cmd(v2v_params)
        if v2v_ret.exit_status != 0:
            test.fail("Convert VM failed")

        params['main_vm'] = v2v_params['new_name']

        logging.info("output_method is %s" % output_method)
        # Check all checkpoints after convert
        params['vmchecker'] = vmchecker = VMChecker(test, params, env)
        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(
                params, address_cache, timeout=v2v_timeout):
            test.error("Import VM failed")

        ret = vmchecker.run()

        # Other checks
        err_list = []
        os_list = [
            'win8', 'win8.1', 'win10', 'win2012', 'win2012r2', 'win2008',
            'win2016', 'win2019'
        ]
        win_version = [
            '6.2', '6.3', '10.0', '6.2', '6.3', '6.0', '10.0', '10.0'
        ]
        os_map = dict(list(zip(os_list, win_version)))
        vm_arch = params.get('vm_arch')
        os_ver = params.get('os_version')

        if os_ver in os_list:
            vga_log = 'The guest will be configured to use a basic VGA ' \
                      'display driver'
            if re.search(vga_log, v2v_ret.stdout_text):
                logging.debug('Found vga log')
            else:
                err_list.append('Not find vga log')
            if os_ver != 'win2008':
                qxl_warn = 'virt-v2v: warning: there is no QXL driver for ' \
                           'this version of Windows \(%s[.\s]*?%s\)' %\
                           (os_map[os_ver], vm_arch)
                if re.search(qxl_warn, v2v_ret.stdout_text):
                    logging.debug('Found QXL warning')
                else:
                    err_list.append('Not find QXL warning')

        ret.extend(err_list)

        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            test.fail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
            v2v_sasl.close_session()
        if hypervisor == "xen":
            # Restore crypto-policies to DEFAULT, the setting is impossible to be
            # other values by default in testing envrionment.
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Exemple #4
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    version_requried = params.get("version_requried")
    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    input_file = params.get("input_file")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    source_user = params.get("username", "root")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mnt_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')
    error_flag = 'strict'
    estimate_file = ''

    def create_pool(user_pool=False,
                    pool_name=pool_name,
                    pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir -p %s'" % target_path
            process.system(cmd, verbose=True)
            # Sometimes pool_creat_as returns sucess, but the pool can
            # not be found in user session.
            virsh.pool_create_as(pool_name,
                                 'dir',
                                 target_path,
                                 unprivileged_user=v2v_user,
                                 debug=True)

            res = virsh.pool_info(pool_name,
                                  unprivileged_user=v2v_user,
                                  debug=True)
            if res.exit_status != 0:
                return False
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        return True

    def cleanup_pool(user_pool=False,
                     pool_name=pool_name,
                     pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            virsh.pool_destroy(pool_name,
                               unprivileged_user=v2v_user,
                               debug=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            test.error("Fail to find tmp target file name when converting vm"
                       " disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = to_text(
                process.system_output("grep -R \"%s\" %s" %
                                      (ovf_id, export_vm_dir)))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            test.fail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            test.error("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    test.fail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    test.fail("%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                test.fail("%s format is not %s" % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            test.fail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            test.fail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            test.fail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            logging.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                test.fail('vm_snapshot_id consists with "0"')
        else:
            test.fail('Fail to find snapshot_id')

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i - 1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        logging.debug('Source info to check: %s', source_info)
        checklist = [
            'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks',
            'NICs'
        ]
        if hypervisor in ['kvm', 'xen']:
            checklist.extend(['display', 'CPU features'])
        for key in checklist:
            if key not in source_info:
                test.fail('%s info missing' % key)

        v2v_virsh = None
        close_virsh = False
        if hypervisor == 'kvm':
            v2v_virsh = virsh
        else:
            virsh_dargs = {
                'uri': ic_uri,
                'remote_ip': remote_host,
                'remote_user': source_user,
                'remote_pwd': source_pwd,
                'auto_close': True,
                'debug': True
            }
            v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
            logging.debug('a new virsh session %s was created', v2v_virsh)
            close_virsh = True

        # Check single values
        fail = []
        try:
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
        finally:
            if close_virsh:
                logging.debug('virsh session %s is closing', v2v_virsh)
                v2v_virsh.close_session()

        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'

        if hypervisor in ['kvm', 'xen']:
            check_map['display'] = xml.get_graphics_devices()[0].type_name

        logging.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            logging.info('%-15s:%18s <-> %s', key, source_info[key],
                         check_map[key])
            if str(check_map[key]) not in source_info[key]:
                fail.append(key)

        # Check disk info
        disk = list(xml.get_disk_all_by_expr('device==disk').values())[0]

        def _get_disk_subelement_attr_value(obj, attr, subattr):
            if obj.find(attr) is not None:
                return obj.find(attr).get(subattr)

        bus = _get_disk_subelement_attr_value(disk, 'target', 'bus')
        driver_type = _get_disk_subelement_attr_value(disk, 'driver', 'type')
        path = _get_disk_subelement_attr_value(disk, 'source', 'file')

        # For esx, disk output is like "disks: json: { ... } (raw) [scsi]"
        # For xen, disk output is like "disks: json: { ... } [ide]"
        # For kvm, disk output is like "/rhel8.0-2.qcow2 (qcow2) [virtio-blk]"
        if hypervisor == 'kvm':
            disks_info_pattern = r"%s \(%s\) \[%s" % (path, driver_type, bus)
        elif hypervisor == 'esx':
            # replace '.vmdk' with '-flat.vmdk', this is done in v2v
            path_pattern1 = path.split()[1].replace('.vmdk', '-flat.vmdk')
            # In newer qemu version, '_' is replaced with '%5f'.
            path_pattern2 = path_pattern1.replace('_', '%5f')
            # nbd:unix:/tmp/v2vnbdkit.u44G6C/nbdkit1.sock:exportname=/ (raw)
            # [scsi]
            path_pattern_nbd = r'nbd:unix:/.*? \(raw\) \[%s\]' % bus
            # For esx, '(raw)' is fixed? Let's see if others will be met.
            disks_info_pattern = '|'.join([path_pattern_nbd] + [
                r"https://%s/folder/%s\?dcPath=data&dsName=esx.*} \(raw\) \[%s"
                % (remote_host, i, bus)
                for i in [path_pattern1, path_pattern2]
            ])
        elif hypervisor == 'xen':
            disks_info_pattern = "file\.path.*%s.*file\.host.*%s.* \[%s" % (
                path, remote_host, bus)

        source_disks = source_info['disks'].split()
        logging.info('disks:%s<->%s', source_info['disks'], disks_info_pattern)
        if not re.search(disks_info_pattern, source_info['disks']):
            fail.append('disks')

        # Check nic info
        nic = list(xml.get_iface_all().values())[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        logging.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if nic_info.lower() not in source_info['NICs'].lower():
            fail.append('NICs')

        # Check cpu features
        if hypervisor in ['kvm', 'xen']:
            feature_list = xml.features.get_feature_list()
            logging.info('CPU features:%s<->%s', source_info['CPU features'],
                         feature_list)
            if sorted(source_info['CPU features'].split(',')) != sorted(
                    feature_list):
                fail.append('CPU features')

        if fail:
            test.fail('Source info not correct for: %s' % fail)

    def check_man_page(in_man, not_in_man):
        """
        Check if content of man page or help info meets expectation
        """
        man_page = process.run('man virt-v2v',
                               verbose=False).stdout_text.strip()
        if in_man:
            logging.info('Checking man page of virt-v2v for "%s"', in_man)
            if in_man not in man_page:
                test.fail('"%s" not in man page' % in_man)
        if not_in_man:
            logging.info('Checking man page of virt-v2v for "%s"', not_in_man)
            if not_in_man in man_page:
                test.fail('"%s" not removed from man page' % not_in_man)

    def check_print_estimate(estimate_file):
        """
        Check disk size and total size in file of estimate created by v2v
        """
        import json

        content = None
        buf = ''
        with open(estimate_file) as fp:
            all_content = fp.read()
            fp.seek(0)
            for line in fp:
                buf += line
                if '}' not in line:
                    continue
                if 'disks' in buf and 'total' in buf:
                    content = json.loads(buf)
                    break
                buf = ''

        logging.debug('json file content:\n%s' % all_content)
        if not content or sum(content['disks']) != content['total']:
            test.fail("The disks' size doesn't same as total value")

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    # 76 is the max length in v2v
                    if v2v_start and len(line) > 76:
                        test.fail('Error log longer than 76 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root:
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    vmchecker = VMChecker(test, params, env)
                    params['vmchecker'] = vmchecker
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint in ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                test.fail(
                                    '%s not in --machine-readable output' %
                                    line.strip())
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
            if checkpoint == 'print_estimate_tofile':
                check_print_estimate(estimate_file)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if version_requried and not utils_v2v.multiple_versions_compare(
                version_requried):
            test.cancel("Testing requries version: %s" % version_requried)

        if hypervisor == "xen":
            # See man virt-v2v-input-xen(1)
            process.run('update-crypto-policies --set LEGACY',
                        verbose=True,
                        ignore_status=True,
                        shell=True)

        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            logging.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            logging.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            # Remote libvirt connection is not offically supported by
            # v2v and may fail. Just use localhost to simulate a remote
            # connection to test the warnings.
            if checkpoint == 'remote_libvirt_conn':
                ic_uri = 'qemu+ssh://localhost/system'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
                input_option = "-i ova %s -ic %s -of qcow2" % (input_file,
                                                               ic_uri)
            if checkpoint == 'without_ic':
                input_option = "-i ova %s -of raw" % input_file
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            test.cancel("Unsupported input mode: %s" % input_mode)
        else:
            test.error("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s" % output_mode
            if output_mode != 'null':
                output_option += " -os %s" % output_storage
            if checkpoint == 'rhv':
                output_option = output_option.replace('rhev', 'rhv')
            if checkpoint in ['with_ic', 'without_ic']:
                output_option = output_option.replace('v2v_dir', 'src_pool')
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                test.error("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

            if output_mode == 'rhev':
                # create different sasl_user name for different job
                params.update({
                    'sasl_user':
                    params.get("sasl_user") +
                    utils_misc.generate_random_string(3)
                })
                logging.info('sals user name is %s' % params.get("sasl_user"))

                user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                               params.get("sasl_pwd"))
                v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
                v2v_sasl.server_ip = params.get("remote_ip")
                v2v_sasl.server_user = params.get('remote_user')
                v2v_sasl.server_pwd = params.get('remote_pwd')
                v2v_sasl.setup(remote=True)
                logging.debug('A SASL session %s was created', v2v_sasl)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in [
                'length_of_error', 'empty_nic_source_network',
                'empty_nic_source_bridge', 'machine_readable'
        ]:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
            new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from souce and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         os.path.basename(disk_img))
                logging.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = input_option.replace(disk_img, disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                test.cancel("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            source_pwd = passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
                remote_host, user, passwd, auto_close=False)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                logging.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            source_pwd = vpx_passwd = params.get("vpx_password")
            vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(),
                                           "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " -ip %s" % vpx_passwd_file
            # rhel8 slow stream doesn't support option 'ip' temporarily
            # so use option 'password-file' instead.
            if not utils_v2v.v2v_supported_option("-ip <filename>"):
                output_option = output_option.replace('-ip', '--password-file',
                                                      1)

        # if don't specify any output option for virt-v2v, 'default' pool
        # will be used.
        if output_mode is None:
            # Cleanup first to avoid failure if 'default' pool exists.
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        # Create libvirt dir pool
        if output_mode == "libvirt":
            utils_misc.wait_for(create_pool, timeout=30, step=3)

        # Work around till bug fixed
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            utils_misc.wait_for(
                lambda: create_pool(user_pool=True,
                                    pool_name='src_pool',
                                    pool_target='v2v_src_pool'),
                timeout=30,
                step=3)

        if checkpoint == 'vmx':
            mount_point = params.get('mount_point')
            if not os.path.isdir(mount_point):
                os.mkdir(mount_point)
            nfs_vmx = params.get('nfs_vmx')
            if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True):
                test.error('Mount nfs for vmx failed')
            vmx = params.get('vmx')
            input_option = '-i vmx %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'vmx_ssh':
            esx_user = params.get("esx_host_user", "root")
            esx_pwd = params.get("esx_host_passwd")
            vmx = params.get('vmx')
            esx_pubkey, esx_session = utils_v2v.v2v_setup_ssh_key(
                esx_ip, esx_user, esx_pwd, server_type='esx', auto_close=False)
            utils_misc.add_identities_into_ssh_agent()
            input_option = '-i vmx -it ssh %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'simulate_nfs':
            simulate_images = params.get("simu_images_path")
            simulate_vms = params.get("simu_vms_path")
            simulate_dom_md = params.get("simu_dom_md_path")
            os.makedirs(simulate_images)
            os.makedirs(simulate_vms)
            process.run('touch %s' % simulate_dom_md)
            process.run('chmod -R 777 /tmp/rhv/')

        if checkpoint == 'print_estimate_tofile':
            estimate_file = utils_misc.generate_tmp_file_name(
                'v2v_print_estimate')
            v2v_options += " --machine-readable=file:%s" % estimate_file

        if checkpoint == 'remote_libvirt_conn':
            # Add localhost to known_hosts
            cmd = 'ssh-keyscan -t ecdsa localhost >> ~/.ssh/known_hosts'
            process.run(cmd, shell=True)
            # Setup remote login without password
            public_key = ssh_key.get_public_key().rstrip()
            cmd = 'echo "%s" >> ~/.ssh/authorized_keys' % public_key
            process.run(cmd, shell=True)

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd_export_env = 'export LIBGUESTFS_BACKEND=direct'
            cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd)

        if params.get('cmd_free') == 'yes':
            cmd = params.get('check_command')
            # only set error to 'ignore' to avoid exception for RHEL7-84978
            if "guestfish" in cmd:
                error_flag = "replace"

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(),
                                              'tail_log')
            params['tail'] = aexpect.Tail(command='tail -f /var/log/messages',
                                          output_func=utils_misc.log_line,
                                          output_params=(params['tail_log'], ))
        cmd_result = process.run(cmd,
                                 timeout=v2v_timeout,
                                 verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except Exception:
                    logging.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True,
                                 pool_name='src_pool',
                                 pool_target='v2v_src_pool')
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        if output_mode is None:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag and params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if new_v2v_user:
            process.system("userdel -fr %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if checkpoint == 'vmx':
            utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs')
            os.rmdir(params['mount_point'])
        if checkpoint == 'vmx_ssh':
            utils_v2v.v2v_setup_ssh_key_cleanup(esx_session, esx_pubkey, 'esx')
            process.run("ssh-agent -k")
        if checkpoint == 'simulate_nfs':
            process.run('rm -rf /tmp/rhv/')
        if os.path.exists(estimate_file):
            os.remove(estimate_file)
        if hypervisor == "xen":
            # Restore crypto-policies to DEFAULT, the setting is impossible to be
            # other values by default in testing envrionment.
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        if checkpoint == 'remote_libvirt_conn':
            cmd = r"sed -i '/localhost/d' ~/.ssh/known_hosts"
            process.run(cmd, shell=True, ignore_status=True)
            if locals().get('public_key'):
                key = public_key.rstrip().split()[1].split('/')[0]
                cmd = r"sed -i '/%s/d' ~/.ssh/authorized_keys" % key
                process.run(cmd, shell=True, ignore_status=True)
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    vm_name = params.get("main_vm")
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    target = params.get("target")
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get('v2v_opts'):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    source_pwd = None

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_pwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception:
            process.run("ssh-agent -k")
            raise exceptions.TestError("Fail to setup ssh-agent")
    else:
        raise exceptions.TestSkipError("Unsupported hypervisor: %s" %
                                       hypervisor)

    # Create libvirt URI for the source node
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("Remote host uri for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    virsh_dargs = {
        'uri': remote_uri,
        'remote_ip': source_ip,
        'remote_user': source_user,
        'remote_pwd': source_pwd,
        'auto_close': True,
        'debug': True
    }
    remote_virsh = virsh.VirshPersistent(**virsh_dargs)
    try:
        if not remote_virsh.domain_exists(vm_name):
            raise exceptions.TestError("VM '%s' not exist" % vm_name)
    finally:
        remote_virsh.close_session()

    # Prepare libvirt storage pool
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    libvirt_pool = utlv.PoolVolumeTest(test, params)
    libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '')

    # Prepare libvirt virtual network
    network = params.get("network")
    net_kwargs = {
        'net_name': network,
        'address': params.get('network_addr'),
        'dhcp_start': params.get('network_dhcp_start'),
        'dhcp_end': params.get('network_dhcp_end')
    }
    libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs)
    net_info = virsh.net_info(network).stdout.strip()
    bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1)
    params['netdst'] = bridge

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'os_pool': pool_name,
        'hostname': source_ip,
        'password': source_pwd,
        'input_transport': input_transport,
        'vcenter_host': vpx_ip,
        'vcenter_password': vpx_pwd,
        'vddk_thumbprint': vddk_thumbprint,
        'vddk_libdir': vddk_libdir,
        'vddk_libdir_src': vddk_libdir_src,
        'params': params
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    # Set libguestfs environment
    if hypervisor == 'xen':
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        ret = utils_v2v.v2v_cmd(v2v_params)
        if ret.exit_status != 0:
            raise exceptions.TestFail("Convert VM failed")

        LOG.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        # Win10 is not supported by some cpu model,
        # need to modify to 'host-model'
        unsupport_list = ['win10', 'win2016', 'win2019']
        if params.get('os_version') in unsupport_list:
            LOG.info('Set cpu mode to "host-model" for %s.', unsupport_list)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.mode = 'host-model'
            cpu_xml.fallback = 'allow'
            vmxml['cpu'] = cpu_xml
            vmxml.sync()
        vm.start()

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        # Cannot do "params['vmchecker'] = vmchecker" if vm was to_libvirt,
        # or exception 'Fatal Python error: Cannot recover from stack overflow'
        # will happen. Not sure how it happens now, if somebody knows about it,
        # Please help to fix it.
        #
        # The exception happens at:
        # avocado/utils/stacktrace.py, line 98 in analyze_unpickable_item
        # <FIXIT> in future.
        try:
            ret = vmchecker.run()
        finally:
            vmchecker.cleanup()

        if len(ret) == 0:
            LOG.info("All checkpoints passed")
        else:
            raise exceptions.TestFail("%d checkpoints failed: %s" %
                                      (len(ret), ret))
    finally:
        utils_v2v.cleanup_constant_files(params)
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        # Clean libvirt VM
        virsh.remove_domain(vm_name)
        # Clean libvirt pool
        if libvirt_pool:
            libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '')
        # Clean libvirt network
        if libvirt_net:
            libvirt_net.cleanup()
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    os_pool = storage = params.get('storage')
    storage_name = params.get('storage_name')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_passwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split('/')[2]
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    skip_vm_check = params.get('skip_vm_check', 'no')
    os_version = params.get('os_version')
    os_type = params.get('os_type')

    # create different sasl_user name for different job
    params.update({'sasl_user': params.get("sasl_user") +
                   utils_misc.generate_random_string(3)})
    LOG.info('sals user name is %s' % params.get("sasl_user"))

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
            source_ip, source_user, source_pwd, auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    if output_method == 'rhv_upload':
        # Create password file for '-o rhv_upload' to connect to ovirt
        with open(rhv_passwd_file, 'w') as f:
            f.write(rhv_passwd)
        # Copy ca file from ovirt to local
        remote.scp_from_remote(ovirt_hostname, 22, 'root',
                               ovirt_engine_passwd,
                               ovirt_ca_file_path,
                               local_ca_file_path)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip,
                       'remote_user': source_user, 'remote_pwd': source_pwd,
                       'auto_close': True,
                       'debug': True}
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            test.error("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {'target': target, 'hypervisor': hypervisor,
                  'main_vm': vm_name, 'input_mode': input_mode,
                  'network': network, 'bridge': bridge,
                  'os_storage': storage, 'hostname': source_ip,
                  # For virsh connection
                  'password': source_pwd,
                  'new_name': vm_name + utils_misc.generate_random_string(3),
                  'output_method': output_method, 'os_storage_name': storage_name,
                  'input_transport': input_transport, 'vcenter_host': vpx_ip,
                  'vcenter_password': vpx_pwd,
                  'vddk_thumbprint': vddk_thumbprint,
                  'vddk_libdir': vddk_libdir,
                  'vddk_libdir_src': vddk_libdir_src,
                  'params': params
                  }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    if rhv_upload_opts:
        v2v_params.update({"rhv_upload_opts": rhv_upload_opts})
    output_format = params.get('output_format')
    # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None
    if output_format:
        v2v_params.update({'of_format': output_format})

    # Set libguestfs environment variable
    if hypervisor in ('xen', 'kvm'):
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        v2v_ret = utils_v2v.v2v_cmd(v2v_params)
        if v2v_ret.exit_status != 0:
            test.fail("Convert VM failed")

        params['main_vm'] = v2v_params['new_name']

        LOG.info("output_method is %s" % output_method)
        # Check all checkpoints after convert
        params['vmchecker'] = vmchecker = VMChecker(test, params, env)
        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                            timeout=v2v_timeout):
            test.error("Import VM failed")

        # When VM is on OSP, it can't obtain IP address, therefore
        # skipping the VM checking.
        if skip_vm_check == 'yes':
            LOG.debug("skip vm checking")
            return

        ret = vmchecker.run()
        ret.extend(check_qxl_warning(v2v_ret.stdout_text, os_type, os_version))

        if len(ret) == 0:
            LOG.info("All checkpoints passed")
        else:
            test.fail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
            v2v_sasl.close_session()
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run("ssh-agent -k")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)