Exemple #1
0
    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist"
                      % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)))
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name)
        if ret.stdout.count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name)
        if not ret.stdout.count(snapshot_name2):
            test.fail("Failed to find snapshot disk")
Exemple #2
0
    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        disk_info_list = []
        output_disk_info = {}
        output_disk_info_list = []
        result = virsh.domblklist(vm_ref,
                                  options,
                                  ignore_status=True,
                                  debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                test.fail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s" %
                          output_disk_info)

            for (k, v) in list(iteritems(output_disk_info)):
                output_disk_info_list.append(v)

            if "--details" in options:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v)
            else:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v[2:])

            disk_info_list.sort()
            logging.debug("The disk info list from xml is: %s" %
                          disk_info_list)
            output_disk_info_list.sort()
            logging.debug("The disk info list from command output is: %s" %
                          output_disk_info_list)

            if disk_info_list != output_disk_info_list:
                test.fail("The output did not match with disk"
                          " info from xml")
def get_vm_blks(vm_name):
    """
    Get the list of vm's block devices
    """
    blk_list = []
    data = virsh.domblklist(vm_name).stdout.splitlines()
    for rec in data:
        if rec.startswith("Target") or rec.startswith("---"):
            continue
        else:
            pos = rec.find(" ")
            if rec[:pos]:
                blk_list.append(rec[:pos])
    return blk_list
Exemple #4
0
def get_vm_blks(vm_name):
    """
    Get the list of vm's block devices
    """
    blk_list = []
    data = virsh.domblklist(vm_name).stdout.splitlines()
    for rec in data:
        if rec.startswith("Target") or rec.startswith("---"):
            continue
        else:
            pos = rec.find(" ")
            if rec[:pos]:
                blk_list.append(rec[:pos])
    return blk_list
    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        disk_info_list = []
        output_disk_info = {}
        output_disk_info_list = []
        result = virsh.domblklist(vm_ref, options,
                                  ignore_status=True, debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                test.fail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s"
                          % output_disk_info)

            for (k, v) in list(iteritems(output_disk_info)):
                output_disk_info_list.append(v)

            if "--details" in options:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v)
            else:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v[2:])

            disk_info_list.sort()
            logging.debug("The disk info list from xml is: %s" % disk_info_list)
            output_disk_info_list.sort()
            logging.debug("The disk info list from command output is: %s"
                          % output_disk_info_list)

            if disk_info_list != output_disk_info_list:
                test.fail("The output did not match with disk"
                          " info from xml")
Exemple #6
0
        def _find_disk_by_cmd():
            """
            Check disk using virsh command

            :return: True if the disk is found, otherwise False
            """
            ret = virsh.domblklist(vm_name, **virsh_options)
            target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout.strip())
            logging.debug(target_disks)

            for one_disk in target_disks:
                if target_dev in one_disk:
                    logging.debug("Found the disk '{}'".format(target_dev))
                    return True
            logging.debug("Can't find the disk '{}'".format(target_dev))
            return False
Exemple #7
0
    def check_blockcopy_xml(vm_name, source_image, ignore_check=False):
        """
        Check blockcopy xml in VM.

        :param vm_name: VM name
        :param source_image: source image name.
        :param ignore_check: default is False.
        """
        if ignore_check:
            return
        source_imge_list = []
        blklist = virsh.domblklist(vm_name).stdout_text.splitlines()
        for line in blklist:
            if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')):
                source_imge_list.append(line.split()[-1])
        logging.debug('domblklist %s:\n%s', vm_name, source_imge_list)
        if not any(source_image in s for s in source_imge_list):
            test.fail("Cannot find expected source image: %s" % source_image)
    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        output_disk_info = {}
        result = virsh.domblklist(vm_ref,
                                  options,
                                  ignore_status=True,
                                  debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                raise error.TestFail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                raise error.TestFail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s" %
                          output_disk_info)

            if "--details" in options:
                if disk_info != output_disk_info:
                    raise error.TestFail("The output did not match with disk"
                                         " info from xml")
            else:
                for i in range(len(disk_info.keys())):
                    disk_info[i] = disk_info[i][2:]
                if disk_info != output_disk_info:
                    raise error.TestFail("The output did not match with disk"
                                         " info from xml")
    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        output_disk_info = {}
        result = virsh.domblklist(vm_ref, options,
                                  ignore_status=True, debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                raise error.TestFail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                raise error.TestFail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i-2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s"
                          % output_disk_info)

            if "--details" in options:
                if disk_info != output_disk_info:
                    raise error.TestFail("The output did not match with disk"
                                         " info from xml")
            else:
                for i in range(len(disk_info.keys())):
                    disk_info[i] = disk_info[i][2:]
                if disk_info != output_disk_info:
                    raise error.TestFail("The output did not match with disk"
                                         " info from xml")
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            logging.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe': r'"C:\program files\redhat\rhev\apt\rhsrvany.exe"'
        }
        fail = False
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if not status:
                logging.error('%s exists' % key)
                fail = True
        if fail:
            log_fail('RHEV file exists after convert to kvm')

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        logging.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        logging.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        logging.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout
        logging.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_rhsrvany_md5(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        logging.info('Check md5 and sha1 of rhsrvany.exe')
        val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1')
        logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1)
        if not val_md5 or not val_sha1:
            test.error('No MD5 or SHA1 value provided')
        cmd_sha1 = params.get('cmd_sha1')
        cmd_md5 = cmd_sha1 + ' MD5'
        sha1 = vmcheck.session.cmd_output(cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '')
        md5 = vmcheck.session.cmd_output(cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '')
        logging.info('Actual MD5=%s, SHA1=%s', md5, sha1)
        if sha1 == val_sha1 and md5 == val_md5:
            logging.info('MD5 and SHA1 are correct')
        else:
            log_fail('MD5 or SHA1 of rhsrvany.exe not correct')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        logging.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        logging.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error and checkpoint != 'vdsm':
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s', str(e))
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'rhev_file':
                check_rhev_file_exist(vmchecker.checker)
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'sdl':
                if output_mode == 'libvirt':
                    vmchecker.check_graphics({'type': 'vnc'})
                elif output_mode == 'rhev':
                    vmchecker.check_graphics({'type': 'spice'})
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'rhsrvany_md5':
                check_rhsrvany_md5(vmchecker.checker)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        # Virtio drivers will not be installed without virtio-win setup
        if checkpoint == 'virtio_win_unset':
            missing_list = params.get('missing').split(',')
            expect_errors = ['Not find driver: ' + x for x in missing_list]
            logging.debug('Expect errors: %s' % expect_errors)
            logging.debug('Actual errors: %s' % error_list)
            if set(error_list) == set(expect_errors):
                error_list[:] = []
            else:
                logging.error('Virtio drivers not meet expectation')
        if len(error_list):
            test.fail('%d checkpoints failed: %s' % (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name,
            'v2v_opts': '-v -x', 'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'storage':  params.get('output_storage', 'default'),
            'network':  params.get('network'),
            'bridge':   params.get('bridge'),
            'target':   params.get('target')
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Setup ssh-agent access to xen hypervisor
        logging.info('set up ssh-agent access ')
        ssh_key.setup_ssh_key(xen_host, user=xen_host_user,
                              port=22, password=xen_host_passwd)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'output_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)

        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        virsh_instance = virsh.VirshPersistent()
        virsh_instance.set_uri(uri)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.startswith(('hda', 'vda', 'sda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['storage'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name, params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {'passwd': params.get('vnc_passwd', 'redhat')}
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                        vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(
                        vmxml, params[checkpoint]['passwd'],
                        virsh_instance=virsh_instance)
            logging.debug(virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            logging.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            logging.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            logging.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                logging.debug(process.run('cat %s' % xml_file).stdout_text)
            if checkpoint == 'format_convert':
                v2v_params['output_format'] = 'qcow2'
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            logging.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' % ssh_banner_content)
            logging.info('Content of ssh_banner file:')
            logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            logging.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0:
                test.error('not removed')
            if checkpoint.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if checkpoint.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if checkpoint.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if checkpoint.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
            logging.debug(xml.xmltreefile)
            disks = xml.get_disk_all()
            logging.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.get('device') == 'cdrom' and disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            logging.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                logging.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            logging.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            logging.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            logging.info('Total disks: %d', params['disk_count'])

        # Check if xen guest exists again
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        process.run('ssh-agent -k')
        if checkpoint == 'vdsm':
            logging.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    logging.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            logging.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
            virsh_instance.close_session()
        if checkpoint == 'ssh_banner':
            logging.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            logging.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        logging.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        logging.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        logging.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name,
                            session_id=vmcheck.virsh_session_id).stdout
        logging.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_rhsrvany_md5(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        logging.info('Check md5 and sha1 of rhsrvany.exe')
        val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1')
        logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1)
        if not val_md5 or not val_sha1:
            test.error('No MD5 or SHA1 value provided')
        cmd_sha1 = params.get('cmd_sha1')
        cmd_md5 = cmd_sha1 + ' MD5'
        sha1 = vmcheck.session.cmd_output(
            cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '')
        md5 = vmcheck.session.cmd_output(
            cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '')
        logging.info('Actual MD5=%s, SHA1=%s', md5, sha1)
        if sha1 == val_sha1 and md5 == val_md5:
            logging.info('MD5 and SHA1 are correct')
        else:
            log_fail('MD5 or SHA1 of rhsrvany.exe not correct')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        logging.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        logging.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error and checkpoint != 'vdsm':
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s', str(e))
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            else:
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
            # Check specific checkpoints
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'sdl':
                if output_mode == 'libvirt':
                    vmchecker.check_graphics({'type': 'vnc'})
                elif output_mode == 'rhev':
                    vmchecker.check_graphics({'type': 'spice'})
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'rhsrvany_md5':
                check_rhsrvany_md5(vmchecker.checker)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        # Virtio drivers will not be installed without virtio-win setup
        if checkpoint == 'virtio_win_unset':
            missing_list = params.get('missing').split(',')
            expect_errors = ['Not find driver: ' + x for x in missing_list]
            logging.debug('Expect errors: %s' % expect_errors)
            logging.debug('Actual errors: %s' % error_list)
            if set(error_list) == set(expect_errors):
                error_list[:] = []
            else:
                logging.error('Virtio drivers not meet expectation')
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': xen_host,
            'hypervisor': 'xen',
            'main_vm': vm_name,
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'password': xen_host_passwd,
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Setup ssh-agent access to xen hypervisor
        logging.info('set up ssh-agent access ')
        ssh_key.setup_ssh_key(xen_host,
                              user=xen_host_user,
                              port=22,
                              password=xen_host_passwd)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'output_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # To RHV doesn't support 'qcow2' right now
            v2v_params['output_format'] = 'raw'
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)

        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        virsh_instance = virsh.VirshPersistent()
        virsh_instance.set_uri(uri)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params['hypervisor'] = 'kvm'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['storage'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name,
                                               params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get('vnc_passwd', 'redhat')
                }
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(vmxml,
                                               params[checkpoint]['passwd'],
                                               virsh_instance=virsh_instance)
            logging.debug(
                virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            logging.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            logging.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            logging.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                logging.debug(process.run('cat %s' % xml_file).stdout_text)
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            logging.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                        ssh_banner_content)
            logging.info('Content of ssh_banner file:')
            logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            logging.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if checkpoint.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if checkpoint.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if checkpoint.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if checkpoint.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            logging.debug(xml.xmltreefile)
            disks = xml.get_disk_all()
            logging.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.get(
                        'device') == 'cdrom' and disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            logging.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                logging.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            logging.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            logging.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            logging.info('Total disks: %d', params['disk_count'])

        # Check if xen guest exists again
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        process.run('ssh-agent -k')
        if checkpoint == 'vdsm':
            logging.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    logging.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            logging.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
            virsh_instance.close_session()
        if checkpoint == 'ssh_banner':
            logging.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
Exemple #12
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    case = params.get('case', '')
    blockcommand = params.get('blockcommand', '')
    blk_top = int(params.get('top', 0))
    blk_base = int(params.get('base', 0))
    opts = params.get('opts', '--verbose --wait')
    check_func = params.get('check_func', '')
    disk_type = params.get('disk_type', '')
    disk_src = params.get('disk_src', '')
    driver_type = params.get('driver_type', 'qcow2')
    vol_name = params.get('vol_name', 'vol_blockpull')
    pool_name = params.get('pool_name', '')
    brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name)
    vg_name = params.get('vg_name', 'HostVG')
    vol_size = params.get('vol_size', '10M')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    # List to collect paths to delete after test
    file_to_del = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        all_disks = vmxml.get_disk_source(vm_name)
        if not all_disks:
            test.error('Not found any disk file in vm.')
        image_file = all_disks[0].find('source').get('file')
        logging.debug('Image file of vm: %s', image_file)

        # Get all dev of virtio disks to calculate the dev of new disk
        all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')]
        disk_dev = all_vdisks[-1].find('target').get('dev')
        new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1)

        # Setup iscsi target
        if disk_src == 'iscsi':
            disk_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target: %s', disk_target)

        # Setup lvm
        elif disk_src == 'lvm':
            # Stop multipathd to avoid vgcreate fail
            multipathd = service.Factory.create_service("multipathd")
            multipathd_status = multipathd.status()
            if multipathd_status:
                multipathd.stop()

            # Setup iscsi target
            device_name = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target for lvm: %s', device_name)

            # Create logical device
            logical_device = device_name
            lv_utils.vg_create(vg_name, logical_device)
            vg_created = True

            # Create logical volume as backing store
            vol_bk, vol_disk = 'vol1', 'vol2'
            lv_utils.lv_create(vg_name, vol_bk, vol_size)

            disk_target = '/dev/%s/%s' % (vg_name, vol_bk)
            src_vol = '/dev/%s/%s' % (vg_name, vol_disk)

        # Setup gluster
        elif disk_src == 'gluster':
            host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True, brick_path=brick_path, **params)
            logging.debug(host_ip)
            gluster_img = 'test.img'
            img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img
            process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
                        % (host_ip, vol_name, img_create_cmd), shell=True)
            disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img)

        else:
            test.error('Wrong disk source, unsupported by this test.')

        new_image = os.path.join(os.path.split(image_file)[0], 'test.img')
        params['snapshot_list'] = ['s%d' % i for i in range(1, 5)]

        if disk_src == 'lvm':
            new_image = src_vol
            if disk_type == 'block':
                new_image = disk_target
                for i in range(2, 6):
                    lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size)
                snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)]
        else:
            file_to_del.append(new_image)
            snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']]
        cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image)
        if disk_type == 'block' and driver_type == 'raw':
            pass
        else:
            process.run(cmd_create_img, verbose=True, shell=True)
        info_new = utils_misc.get_image_info(new_image)
        logging.debug(info_new)

        # Create xml of new disk and add it to vmxml
        if disk_type:
            new_disk = Disk()
            new_disk.xml = libvirt.create_disk_xml({
                'type_name': disk_type,
                'driver_type': driver_type,
                'target_dev': new_dev,
                'source_file': new_image
            })

            logging.debug(new_disk.xml)

            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.xmltreefile.write()
            logging.debug(vmxml)
            vmxml.sync()

        vm.start()
        logging.debug(virsh.dumpxml(vm_name))

        # Create backing chain
        for i in range(len(params['snapshot_list'])):
            virsh.snapshot_create_as(
                vm_name,
                '%s --disk-only --diskspec %s,file=%s,stype=%s' %
                (params['snapshot_list'][i], new_dev, snapshot_image_list[i],
                 disk_type),
                **virsh_dargs
            )

            # Get path of each snapshot file
            snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines()
            for line in snaps:
                if line.lstrip().startswith(('hd', 'sd', 'vd')):
                    file_to_del.append(line.split()[-1])

        qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1]
        if libvirt_storage.check_qemu_image_lock_support():
            qemu_img_cmd += " -U"
        bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text

        if not disk_type == 'block':
            bc_chain = snapshot_image_list[::-1] + [new_image, disk_target]
        else:
            bc_chain = snapshot_image_list[::-1] + [new_image]
        bc_result = check_backingchain(bc_chain, bc_info)
        if not bc_result:
            test.fail('qemu-img info output of backing chain is not correct: %s'
                      % bc_info)

        # Generate blockpull/blockcommit options
        virsh_blk_cmd = eval('virsh.%s' % blockcommand)
        if blockcommand == 'blockpull' and blk_base != 0:
            opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev)
        elif blockcommand == 'blockcommit':
            opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else ''
            opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else ''
            opts += opt_top + opt_base + ' --active' if blk_top == 0 else ''

        # Do blockpull/blockcommit
        virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs)
        if blockcommand == 'blockcommit':
            virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("XML after %s: %s" % (blockcommand, vmxml))

        # Check backing chain after blockpull/blockcommit
        check_bc_func_name = 'check_bc_%s' % check_func
        if check_bc_func_name in globals():
            check_bc = eval(check_bc_func_name)
            if not callable(check_bc):
                logging.warning('Function "%s" is not callable.', check_bc_func_name)
            if not check_bc(blockcommand, vmxml, new_dev, bc_chain):
                test.fail('Backing chain check after %s failed' % blockcommand)
        else:
            logging.warning('Function "%s" is not implemented.', check_bc_func_name)

        virsh.dumpxml(vm_name, debug=True)

        # Check whether login is successful
        try:
            vm.wait_for_login().close()
        except Exception as e:
            test.fail('Vm login failed')

    finally:
        logging.info('Start cleaning up.')
        for ss in params.get('snapshot_list', []):
            virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True)
        bkxml.sync()
        for path in file_to_del:
            logging.debug('Remove %s', path)
            if os.path.exists(path):
                os.remove(path)
        if disk_src == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'lvm':
            process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True)
            if 'vol_bk' in locals():
                lv_utils.lv_remove(vg_name, vol_bk)
            if 'vg_created' in locals() and vg_created:
                lv_utils.vg_remove(vg_name)
                cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
                pv_name = process.system_output(cmd, shell=True, verbose=True).strip()
                if pv_name:
                    process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'gluster':
            gluster.setup_or_cleanup_gluster(
                is_setup=False, brick_path=brick_path, **params)
        if 'multipathd_status' in locals() and multipathd_status:
            multipathd.start()
Exemple #13
0
def run(test, params, env):
    """
    Test for virt-clone, it is used to clone a guest
    to another with given name.

    (1). Get source guest and destination guest name.
    (2). Clone source guest to dest guest with virt-clone.
    (3). Check the dest guest.
    """
    # Get the full path of virt-clone command.
    VIRT_CLONE = os_dep.command("virt-clone")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    dest_guest_name = params.get("dest_vm", "test-clone")
    dest_guest_file = params.get("dest_image")
    dest_guest_path = None

    # Destroy and undefine the test-clone guest before
    # executing virt-clone command.
    virsh.remove_domain(dest_guest_name)

    cmd = "%s --connect=%s -o %s -n %s" % (VIRT_CLONE,
                                           vm.connect_uri,
                                           vm_name,
                                           dest_guest_name)

    domblklist_result = virsh.domblklist(vm_name)

    if len(domblklist_result.stdout.strip().splitlines()) >= 3:
        # We need a file for destination if guest has block devices.
        dest_guest_path = os.path.join(data_dir.get_data_dir(),
                                       dest_guest_file)
        if os.path.exists(dest_guest_path):
            os.remove(dest_guest_path)

        cmd = "%s -f %s" % (cmd, dest_guest_path)

    try:
        cmd_result = utils.run(cmd, ignore_status=True)

        if cmd_result.exit_status:
            raise error.TestFail("command of virt-clone failed.\n"
                                 "output: %s." % cmd_result)

        start_result = None
        # We will get an error of "error: monitor socket did not show up:"
        # when start vm immediately after virt-clone.

        def _start_success():
            start_result = virsh.start(dest_guest_name)
            if start_result.exit_status:
                return False
            return True

        if not utils_misc.wait_for(_start_success, timeout=5):
            raise error.TestFail("command virt-clone exit successfully.\n"
                                 "but start it failed.\n Detail: %s." %
                                 start_result)
    finally:
        # cleanup remove the dest guest.
        virsh.remove_domain(dest_guest_name)
        # remove image file if we created it.
        if dest_guest_path and os.path.exists(dest_guest_path):
            os.remove(dest_guest_path)
Exemple #14
0
def run(test, params, env):
    """
    Test for virt-clone, it is used to clone a guest
    to another with given name.

    (1). Get source guest and destination guest name.
    (2). Clone source guest to dest guest with virt-clone.
    (3). Check the dest guest.
    """
    # Get the full path of virt-clone command.
    try:
        VIRT_CLONE = process.run("which virt-clone",
                                 shell=True).stdout_text.strip()
    except ValueError:
        test.cancel("Not find virt-clone command on host.")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    dest_guest_name = params.get("dest_vm", "test-clone")
    dest_guest_file = params.get("dest_image")
    dest_guest_path = None

    # Destroy and undefine the test-clone guest before
    # executing virt-clone command.
    virsh.remove_domain(dest_guest_name)

    cmd = "%s --connect=%s -o %s -n %s" % (VIRT_CLONE, vm.connect_uri, vm_name,
                                           dest_guest_name)

    domblklist_result = virsh.domblklist(vm_name)

    if len(domblklist_result.stdout.strip().splitlines()) >= 3:
        # We need a file for destination if guest has block devices.
        dest_guest_path = os.path.join(data_dir.get_data_dir(),
                                       dest_guest_file)
        if os.path.exists(dest_guest_path):
            os.remove(dest_guest_path)

        cmd = "%s -f %s" % (cmd, dest_guest_path)

    try:
        cmd_result = process.run(cmd, ignore_status=True, shell=True)

        if cmd_result.exit_status:
            test.fail("command of virt-clone failed.\n"
                      "output: %s." % cmd_result)

        start_result = None

        # We will get an error of "error: monitor socket did not show up:"
        # when start vm immediately after virt-clone.

        def _start_success():
            start_result = virsh.start(dest_guest_name)
            if start_result.exit_status:
                return False
            return True

        if not utils_misc.wait_for(_start_success, timeout=5):
            test.fail("command virt-clone exit successfully.\n"
                      "but start it failed.\n Detail: %s." % start_result)
    finally:
        # cleanup remove the dest guest.
        virsh.remove_domain(dest_guest_name)
        # remove image file if we created it.
        if dest_guest_path and os.path.exists(dest_guest_path):
            os.remove(dest_guest_path)
def run(test, params, env):
    """
    Test command: virsh domblklist.
    1.Prepare test environment.
    2.Run domblklist and check
    3.Do attach disk and rerun domblklist with check
    4.Clean test environment.
    """

    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        disk_info_list = []
        output_disk_info = {}
        output_disk_info_list = []
        result = virsh.domblklist(vm_ref, options,
                                  ignore_status=True, debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                test.fail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s"
                          % output_disk_info)

            for (k, v) in list(iteritems(output_disk_info)):
                output_disk_info_list.append(v)

            if "--details" in options:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v)
            else:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v[2:])

            disk_info_list.sort()
            logging.debug("The disk info list from xml is: %s" % disk_info_list)
            output_disk_info_list.sort()
            logging.debug("The disk info list from command output is: %s"
                          % output_disk_info_list)

            if disk_info_list != output_disk_info_list:
                test.fail("The output did not match with disk"
                          " info from xml")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get all parameters from configuration.
    vm_ref = params.get("domblklist_vm_ref")
    options = params.get("domblklist_options", "")
    info_options = params.get("info_options", "")
    status_error = params.get("status_error", "no")
    front_dev = params.get("domblkinfo_front_dev", "vdd")
    test_attach_disk = os.path.join(test.virtdir, "tmp.img")
    domblkinfo = params.get("domblkinfo", "no")
    extra = ""

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vm_state = vm.state()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # run domblklist and check
    domblklist_test()

    # Test domblkinfo function as well
    if domblkinfo == "yes":
        ret = virsh.domblklist(vm_ref, options,
                               ignore_status=True, debug=True)
        target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout)
        if info_options == "":
            check_list = ["Capacity", "Allocation", "Physical"]
            ret2 = virsh.domblkinfo(vm_ref, target_disks[0])
        elif info_options == "--human":
            check_list = ["Capacity", "Allocation", "Physical", "GiB"]
            cmd = "virsh domblkinfo %s %s %s" % (vm_ref, target_disks[0], info_options)
            ret2 = process.run(cmd, shell=True, ignore_status=True)
        for check in check_list:
            if not re.search(check, results_stdout_52lts(ret2)):
                test.fail("Cmd domblkinfo run failed")

    if status_error == "no":
        try:
            # attach disk and check
            with open(test_attach_disk, 'wb') as source_file:
                source_file.seek((512 * 1024 * 1024) - 1)
                source_file.write(str(0).encode())
            # since bug 1049529, --config will work with detach when
            # domain is running, so change it back using --config here
            if "--inactive" in options or vm_state == "shut off":
                extra = "--config"
            virsh.attach_disk(vm_name, test_attach_disk, front_dev, extra,
                              debug=True)
            domblklist_test()
        finally:
            virsh.detach_disk(vm_name, front_dev, extra, debug=True)
            if os.path.exists(test_attach_disk):
                os.remove(test_attach_disk)
Exemple #16
0
        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        if checkpoint in bk_list:
            virsh_instance = virsh.VirshPersistent()
            virsh_instance.set_uri(uri)
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        elif checkpoint == 'xvda_disk':
            v2v_params['input_mode'] = 'disk'
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.startswith(('hda', 'vda', 'sda')):
                    remote_disk_image = line.split()[-1]
                    break
            # Local path of disk image
            input_file = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            v2v_params.update({'input_file': input_file})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd, remote_disk_image,
                                   input_file)
        elif checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        virsh_instance = virsh.VirshPersistent()
        virsh_instance.set_uri(uri)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.startswith(('hda', 'vda', 'sda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    xattr_check = 'yes' == params.get("xattr_check", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
        "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get variables about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        stat_re = os.stat(disk_path)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in list(disks.values()):
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            try:
                utils_selinux.del_defcon(img_label_type, pathregex=dir_path)
            except Exception as err:
                logging.debug("Delete label failed: %s", err)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=disk_path,
                                        readonly=False,
                                        forcedesc=True)
            if sec_relabel == "no" and sec_type == 'none':
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined
                or security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            # Need another guest to test the xattr added by libvirt
            if xattr_check:
                blklist = virsh.domblklist(vm_name, debug=True)
                target_disk = re.findall(r"[v,s]d[a-z]",
                                         blklist.stdout.strip())[0]
                guest_name = "%s_%s" % (vm_name, '1')
                cmd = "virt-clone --original %s --name %s " % (vm_name,
                                                               guest_name)
                cmd += "--auto-clone --skip-copy=%s" % target_disk
                process.run(cmd, shell=True, verbose=True)
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")
            # Start another vm with the same disk image.
            # The xattr will not be changed.
            if xattr_check:
                virsh.start(guest_name, ignore_status=True, debug=True)
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                test.fail("Label of VM is not expected after "
                          "starting.\n"
                          "Detail: vm_context=%s, sec_label=%s" %
                          (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=list(disks.values())[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                test.fail("Label of disk is not expected after VM "
                          "starting.\n"
                          "Detail: disk_context=%s, img_label=%s." %
                          (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    test.fail("Label of disk is not relabeled by "
                              "VM\nDetal: disk_context="
                              "%s, imagelabel=%s" % (disk_context, imagelabel))
                expected_results = "trusted.libvirt.security.ref_dac=\"1\"\n"
                expected_results += "trusted.libvirt.security.ref_selinux=\"1\""
                cmd = "getfattr -m trusted.libvirt.security -d %s " % list(
                    disks.values())[0]['source']
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            filename = list(disks.values())[0]['source']
            img_label_after = utils_selinux.get_context_of_file(filename)
            stat_re = os.stat(filename)
            ownership_of_disk = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            logging.debug("The ownership of disk after guest starting is:\n")
            logging.debug(ownership_of_disk)
            logging.debug("The ownership of disk before guest starting is:\n")
            logging.debug(backup_ownership_of_disks[filename])
            if not (sec_relabel == "no" and sec_type == 'none'):
                if not libvirt_version.version_compare(5, 6, 0):
                    if img_label_after != img_label:
                        # Bug 547546 - RFE: the security drivers must remember original
                        # permissions/labels and restore them after
                        # https://bugzilla.redhat.com/show_bug.cgi?id=547546
                        err_msg = "Label of disk is not restored in VM shutting down.\n"
                        err_msg += "Detail: img_label_after=%s, " % img_label_after
                        err_msg += "img_label_before=%s.\n" % img_label
                        err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                        err_msg += ".cgi?id=547546"
                        test.fail(err_msg)
                elif (img_label_after != img_label or ownership_of_disk !=
                      backup_ownership_of_disks[filename]):
                    err_msg = "Label of disk is not restored in VM shutting down.\n"
                    err_msg += "Detail: img_label_after=%s, %s " % (
                        img_label_after, ownership_of_disk)
                    err_msg += "img_label_before=%s, %s\n" % (
                        img_label, backup_ownership_of_disks[filename])
                    test.fail(err_msg)
            # The xattr should be cleaned after guest shutoff.
            cmd = "getfattr -m trusted.libvirt.security -d %s " % list(
                disks.values())[0]['source']
            utils_test.libvirt.check_cmd_output(cmd, content="")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            try:
                utils_selinux.del_defcon(img_label_type, pathregex=dir_path)
            except Exception as err:
                logging.debug("Delete label failed: %s", err)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in list(backup_ownership_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        if xattr_check:
            virsh.undefine(guest_name, ignore_status=True)
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined
                or security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Exemple #19
0
def run(test, params, env):
    """
    Test command: virsh domblklist.
    1.Prepare test environment.
    2.Run domblklist and check
    3.Do attach disk and rerun domblklist with check
    4.Clean test environment.
    """
    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        disk_info_list = []
        output_disk_info = {}
        output_disk_info_list = []
        result = virsh.domblklist(vm_ref,
                                  options,
                                  ignore_status=True,
                                  debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                test.fail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s" %
                          output_disk_info)

            for (k, v) in list(iteritems(output_disk_info)):
                output_disk_info_list.append(v)

            if "--details" in options:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v)
            else:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v[2:])

            disk_info_list.sort()
            logging.debug("The disk info list from xml is: %s" %
                          disk_info_list)
            output_disk_info_list.sort()
            logging.debug("The disk info list from command output is: %s" %
                          output_disk_info_list)

            if disk_info_list != output_disk_info_list:
                test.fail("The output did not match with disk"
                          " info from xml")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get all parameters from configuration.
    vm_ref = params.get("domblklist_vm_ref")
    options = params.get("domblklist_options", "")
    info_options = params.get("info_options", "")
    status_error = params.get("status_error", "no")
    front_dev = params.get("domblkinfo_front_dev", "vdd")
    test_attach_disk = os.path.join(test.virtdir, "tmp.img")
    domblkinfo = params.get("domblkinfo", "no")
    extra = ""

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vm_state = vm.state()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # run domblklist and check
    domblklist_test()

    # Test domblkinfo function as well
    if domblkinfo == "yes":
        ret = virsh.domblklist(vm_ref, options, ignore_status=True, debug=True)
        target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout)
        if info_options == "":
            check_list = ["Capacity", "Allocation", "Physical"]
            ret2 = virsh.domblkinfo(vm_ref, target_disks[0])
        elif info_options == "--human":
            check_list = ["Capacity", "Allocation", "Physical", "GiB"]
            cmd = "virsh domblkinfo %s %s %s" % (vm_ref, target_disks[0],
                                                 info_options)
            ret2 = process.run(cmd, shell=True, ignore_status=True)
        for check in check_list:
            if not re.search(check, results_stdout_52lts(ret2)):
                test.fail("Cmd domblkinfo run failed")

    if status_error == "no":
        try:
            # attach disk and check
            with open(test_attach_disk, 'wb') as source_file:
                source_file.seek((512 * 1024 * 1024) - 1)
                source_file.write(str(0).encode())
            # since bug 1049529, --config will work with detach when
            # domain is running, so change it back using --config here
            if "--inactive" in options or vm_state == "shut off":
                extra = "--config"
            virsh.attach_disk(vm_name,
                              test_attach_disk,
                              front_dev,
                              extra,
                              debug=True)
            domblklist_test()
        finally:
            virsh.detach_disk(vm_name, front_dev, extra, debug=True)
            if os.path.exists(test_attach_disk):
                os.remove(test_attach_disk)