Example #1
0
 def run_test(self):
     """
     Start test, Creat a volume.
     """
     emulated_size = "%sG" % (int(self.volume_size[:-1]) + 1)
     if int(self.volume_size[:-1]) <= 1:
         self.test.cancel("Volume size must large than 1G")
     self.pvtest = libvirt.PoolVolumeTest(self.test, self.params)
     self.pvtest.pre_pool(self.pool_name,
                          self.pool_type,
                          self.pool_target,
                          self.emulated_img,
                          image_size=emulated_size,
                          pre_disk_vol=[self.volume_size])
     self.pool = libvirt_storage.PoolVolume(self.pool_name)
     self.pool.create_volume(self.vol_name, self.volume_size)
def run(test, params, env):
    """
    Test storage pool and volumes with applications such as:
    install vms, attached to vms...
    """
    pool_type = params.get("pool_type")
    pool_name = "test_%s_app" % pool_type
    pool_target = params.get("pool_target")
    emulated_img = params.get("emulated_image", "emulated-image")
    volume_count = int(params.get("volume_count", 1))
    volume_size = params.get("volume_size", "1G")
    emulated_size = "%sG" % (volume_count * int(volume_size[:-1]) + 1)
    application = params.get("application", "install")
    disk_target = params.get("disk_target", "vdb")
    test_message = params.get("test_message", "")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    block_device = params.get("block_device", "/DEV/EXAMPLE")
    if application == "install":
        cdrom_path = os.path.join(data_dir.get_data_dir(),
                                  params.get("cdrom_cd1"))
        if not os.path.exists(cdrom_path):
            raise error.TestNAError("Can't find installation cdrom:%s"
                                    % cdrom_path)
        # Get a nonexist domain name
        vm_name = "vol_install_test"

    try:
        pvtest = utlv.PoolVolumeTest(test, params)
        pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                        image_size=emulated_size, pre_disk_vol=[volume_size],
                        device_name=block_device)

        logging.debug("Current pools:\n%s",
                      libvirt_storage.StoragePool().list_pools())

        new_pool = libvirt_storage.PoolVolume(pool_name)
        if pool_type == "disk":
            volumes = new_pool.list_volumes()
            logging.debug("Current volumes:%s", volumes)
        else:
            volumes = create_volumes(new_pool, volume_count, volume_size)
        if application == "attach":
            vm = env.get_vm(vm_name)
            session = vm.wait_for_login()
            virsh.attach_disk(vm_name, volumes.values()[volume_count - 1],
                              disk_target)
            vm_attach_device = "/dev/%s" % disk_target
            if session.cmd_status("which parted"):
                # No parted command, check device only
                if session.cmd_status("ls %s" % vm_attach_device):
                    raise error.TestFail("Didn't find attached device:%s"
                                         % vm_attach_device)
                return
            # Test if attached disk can be used normally
            utlv.mk_part(vm_attach_device, session=session)
            session.cmd("mkfs.ext4 %s1" % vm_attach_device)
            session.cmd("mount %s1 /mnt" % vm_attach_device)
            session.cmd("echo %s > /mnt/test" % test_message)
            output = session.cmd_output("cat /mnt/test").strip()
            if output != test_message:
                raise error.TestFail("%s cannot be used normally!"
                                     % vm_attach_device)
        elif application == "install":
            # Get a nonexist domain name anyway
            while virsh.domain_exists(vm_name):
                vm_name += "_test"
            # Prepare installation parameters
            params["main_vm"] = vm_name
            vm = env.create_vm("libvirt", None, vm_name, params,
                               test.bindir)
            env.register_vm(vm_name, vm)
            params["image_name"] = volumes.values()[volume_count - 1]
            params["image_format"] = "raw"
            params['force_create_image'] = "yes"
            params['remove_image'] = "yes"
            params['shutdown_cleanly'] = "yes"
            params['shutdown_cleanly_timeout'] = 120
            params['guest_port_unattended_install'] = 12323
            params['inactivity_watcher'] = "error"
            params['inactivity_treshold'] = 1800
            params['image_verify_bootable'] = "no"
            params['unattended_delivery_method'] = "cdrom"
            params['drive_index_unattended'] = 1
            params['drive_index_cd1'] = 2
            params['boot_once'] = "d"
            params['medium'] = "cdrom"
            params['wait_no_ack'] = "yes"
            params['image_raw_device'] = "yes"
            params['backup_image_before_testing'] = "no"
            params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                       "console=ttyS0,115200 console=tty0")
            params['cdroms'] = "unattended cd1"
            params['redirs'] += " unattended_install"
            selinux_mode = None
            try:
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")
                try:
                    unattended_install.run(test, params, env)
                except process.CmdError, detail:
                    raise error.TestFail("Guest install failed:%s" % detail)
            finally:
                if selinux_mode is not None:
                    utils_selinux.set_status(selinux_mode)
                env.unregister_vm(vm_name)
    finally:
        try:
            if application == "install":
                if virsh.domain_exists(vm_name):
                    virsh.remove_domain(vm_name)
            elif application == "attach":
                virsh.detach_disk(vm_name, disk_target)
        finally:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img,
                                device_name=block_device)
Example #3
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    version_requried = params.get("version_requried")
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    json_disk_pattern = params.get('json_disk_pattern')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    os_version = params.get('os_version')
    os_type = params.get('os_type')
    virtio_win_path = params.get('virtio_win_path')
    # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso
    qa_path = params.get('qa_path')
    # download url of qemu-guest-agent
    qa_url = params.get('qa_url')
    v2v_sasl = None
    # default values for v2v_cmd
    auto_clean = True
    cmd_only = False

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if check == 'vmtools':
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        elif check == 'vmtools_service':
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        if os_version == 'rhel7':
            chkfiles = [
                '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
            ]
        if os_version == 'rhel6':
            chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf']
        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_windows_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        try:
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info('qemu-ga'), re.I),
                                      300,
                                      step=30)
        except ShellProcessTerminatedError:
            # Windows guest may reboot after installing qemu-ga service
            logging.debug('Windows guest is rebooting')
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            vmcheck.create_session()
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info('qemu-ga'), re.I),
                                      300,
                                      step=30)

        if not res:
            test.fail('Not found running qemu-ga service')

    def check_linux_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vender = vmcheck.get_vm_os_vendor()
            if vender in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        if os.path.isfile(os.getenv('VIRTIO_WIN')):
            mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                              'rhv_tools_setup_iso',
                                              fstype='iso9660')
            export_path = params['tmp_mount_point'] = mount_point
        else:
            export_path = os.getenv('VIRTIO_WIN')

        qemu_guest_agent_dir = os.path.join(export_path, qa_path)
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent verion in vm: %s' % vm_pkg_ver)

        # If qemu-guest-agent version in VM is higher than the pkg in qemu-guest-agent-iso,
        # v2v will not update the qemu-guest-agent version and report a warning.
        #
        # e.g.
        # virt-v2v: warning: failed to install QEMU Guest Agent: command:         package
        # qemu-guest-agent-10:2.12.0-3.el7.x86_64 (which is newer than
        # qemu-guest-agent-10:2.12.0-2.el7.x86_64) is already installed
        if not any([vm_pkg_ver in pkg for pkg in all_pkgs]):
            logging.debug(
                'Wrong qemu-guest-agent version, maybe it is higher than package version in ISO'
            )
            logging.info(
                'Unexpected qemu-guest-agent version, set v2v log checking')
            expect_msg_ptn = r'virt-v2v: warning: failed to install QEMU Guest Agent.*?is newer than.*? is already installed'
            params.update({'msg_content': expect_msg_ptn, 'expect_msg': 'yes'})

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_ubuntools(vmcheck):
        """
        Check open-vm-tools, ubuntu-server in VM

        :param vmcheck: VMCheck object for vm checking
        """
        logging.info('Check if open-vm-tools service stopped')
        status = utils_misc.get_guest_service_status(vmcheck.session,
                                                     'open-vm-tools')
        logging.info('Service open-vm-tools status: %s', status)
        if status != 'inactive':
            log_fail('Service open-vm-tools is not stopped')
        else:
            logging.info('Check if the ubuntu-server exist')
            content = vmcheck.session.cmd('dpkg -s ubuntu-server')
            if 'install ok installed' in content:
                logging.info('ubuntu-server has not been removed.')
            else:
                log_fail('ubuntu-server has been removed')

    def global_pem_setup(f_pem):
        """
        Setup global rhv server ca

        :param f_pem: ca file path
        """
        ca_anchors_dir = '/etc/pki/ca-trust/source/anchors'
        shutil.copy(f_pem, ca_anchors_dir)
        process.run('update-ca-trust extract', shell=True)
        os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem)))

    def global_pem_cleanup():
        """
        Cleanup global rhv server ca
        """
        process.run('update-ca-trust extract', shell=True)

    def cmd_remove_option(cmd, opt_pattern):
        """
        Remove an option from cmd

        :param cmd: the cmd
        :param opt_pattern: a pattern stands for the option
        """
        for item in re.findall(opt_pattern, cmd):
            cmd = cmd.replace(item, '').strip()
        return cmd

    def find_net(bridge_name):
        """
        Find which network use specified bridge

       :param bridge_name: bridge name you want to find
        """
        net_list = virsh.net_state_dict(only_names=True)
        net_name = ''
        if len(net_list):
            for net in net_list:
                net_info = virsh.net_info(net).stdout.strip()
                search = re.search(r'Bridge:\s+(\S+)', net_info)
                if search:
                    if bridge_name == search.group(1):
                        net_name = net
        else:
            logging.info('Conversion server has no network')
        return net_name

    def destroy_net(net_name):
        """
        destroy network in conversion server
        """
        if virsh.net_state_dict()[net_name]['active']:
            logging.info("Remove network %s in conversion server", net_name)
            virsh.net_destroy(net_name)
            if virsh.net_state_dict()[net_name]['autostart']:
                virsh.net_autostart(net_name, "--disable")
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def start_net(net_name):
        """
        start network in conversion server
        """
        logging.info("Recover network %s in conversion server", net_name)
        virsh.net_autostart(net_name)
        if not virsh.net_state_dict()[net_name]['active']:
            virsh.net_start(net_name)
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            if checkpoint == 'ogac':
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    check_windows_ogac(vmchecker.checker)
                else:
                    check_linux_ogac(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
                virsh_session.close()
            if checkpoint.startswith('vmtools'):
                check_vmtools(vmchecker.checker, checkpoint)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'resume_swap':
                check_resume_swap(vmchecker.checker)
            if checkpoint == 'rhev_file':
                check_rhev_file_exist(vmchecker.checker)
            if checkpoint == 'file_architecture':
                check_file_architecture(vmchecker.checker)
            if checkpoint == 'ubuntu_tools':
                check_ubuntools(vmchecker.checker)
            if checkpoint == 'without_default_net':
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)

        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        if version_requried and not utils_v2v.compare_version(
                version_requried):
            test.cancel("Testing requries version: %s" % version_requried)

        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'os_storage': os_storage,
            'os_pool': os_pool,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'oo_json_disk_pattern': json_disk_pattern,
            'params': params
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'of_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'ogac':
            os.environ['VIRTIO_WIN'] = virtio_win_path
            if not os.path.exists(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

            if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux':
                export_path = os.getenv('VIRTIO_WIN')
                qemu_guest_agent_dir = os.path.join(export_path, qa_path)
                if not os.path.exists(qemu_guest_agent_dir) and os.access(
                        export_path, os.W_OK) and qa_url:
                    logging.debug(
                        'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,'
                        ' Try to prepare it manually. This is not a permanant step, once'
                        ' the official build includes it, this step should be removed.'
                    )
                    os.makedirs(qemu_guest_agent_dir)
                    rpm_name = os.path.basename(qa_url)
                    download.get_file(
                        qa_url, os.path.join(qemu_guest_agent_dir, rpm_name))

        if checkpoint == 'virtio_iso_blk':
            if not os.path.exists(virtio_win_path):
                test.fail('%s does not exist' % virtio_win_path)

            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup a loop device
            cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path)
            process.run(cmd, shell=True)
            os.environ['VIRTIO_WIN'] = free_loop_dev

        if checkpoint == 'invalid_pem':
            # simply change the 2nd line to lowercase to get an invalid pem
            with open(local_ca_file_path, 'r+') as fd:
                for i in range(2):
                    pos = fd.tell()
                    res = fd.readline()
                fd.seek(pos)
                fd.write(res.lower())
                fd.flush()

        if checkpoint == 'without_default_net':
            net_name = find_net('virbr0')
            if net_name:
                destroy_net(net_name)

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'auto_close': True,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
            remote_virsh.close_session()
        else:
            if checkpoint == 'exist_uuid':
                auto_clean = False
            if checkpoint in [
                    'mismatched_uuid', 'no_uuid', 'system_rhv_pem_set',
                    'system_rhv_pem_unset'
            ]:
                cmd_only = True
                auto_clean = False
            v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        if checkpoint.startswith('system_rhv_pem'):
            if checkpoint == 'system_rhv_pem_set':
                global_pem_setup(local_ca_file_path)
            rhv_cafile = r'-oo rhv-cafile=\S+\s*'
            new_cmd = cmd_remove_option(v2v_result, rhv_cafile)
            logging.debug('New v2v command:\n%s', new_cmd)
        if checkpoint == 'mismatched_uuid':
            # append more uuid
            new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4())
        if checkpoint == 'no_uuid':
            rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*'
            new_cmd = cmd_remove_option(v2v_result, rhv_disk_uuid)
            logging.debug('New v2v command:\n%s', new_cmd)
        if checkpoint == 'exist_uuid':
            new_vm_name = v2v_params['new_name'] + '_exist_uuid'
            new_cmd = v2v_result.command.replace('-on %s' % vm_name,
                                                 '-on %s' % new_vm_name)
            logging.debug('re-run v2v command:\n%s', new_cmd)

        if checkpoint in [
                'mismatched_uuid', 'no_uuid', 'exist_uuid',
                'system_rhv_pem_set', 'system_rhv_pem_unset'
        ]:
            v2v_result = utils_v2v.cmd_run(new_cmd,
                                           params.get('v2v_dirty_resources'))

        check_result(v2v_result, status_error)

    finally:
        if checkpoint == 'ogac' and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if checkpoint == 'virtio_iso_blk':
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os.environ.pop('VIRTIO_WIN')
        if checkpoint == 'system_rhv_pem_set':
            global_pem_cleanup()
        if checkpoint == 'without_default_net':
            if net_name:
                start_net(net_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Example #4
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    V2V_UNSUPPORT_RHEV_APT_VER = "[virt-v2v-1.43.3-4.el9,)"

    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_cp = params.get("enable_legacy_crypto_policies",
                                  'no') == 'yes'
    version_requried = params.get("version_requried")
    unprivileged_user = params_get(params, 'unprivileged_user')
    vpx_hostname = params.get('vpx_hostname')
    vpx_passwd = params.get("vpx_password")
    esxi_host = esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '').split(',')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    error_list = []
    remote_host = vpx_hostname
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')
    src_uri_type = params.get('src_uri_type')
    esxi_password = params.get('esxi_password')
    json_disk_pattern = params.get('json_disk_pattern')
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    os_version = params.get('os_version')
    os_type = params.get('os_type')
    virtio_win_path = params.get('virtio_win_path')
    # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso
    qa_path = params.get('qa_path')
    # download url of qemu-guest-agent
    qa_url = params.get('qa_url')
    v2v_sasl = None
    # default values for v2v_cmd
    auto_clean = True
    cmd_only = False
    cmd_has_ip = 'yes' == params.get('cmd_has_ip', 'yes')
    interaction_run = 'yes' == params.get('interaction_run', 'no')

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_vmtools(vmcheck, check):
        """
        Check whether vmware tools packages have been removed,
        or vmware-tools service has stopped

        :param vmcheck: VMCheck object for vm checking
        :param check: Checkpoint of different cases
        :return: None
        """
        if "service" not in check:
            logging.info('Check if packages been removed')
            pkgs = vmcheck.session.cmd('rpm -qa').strip()
            removed_pkgs = params.get('removed_pkgs').strip().split(',')
            if not removed_pkgs:
                test.error('Missing param "removed_pkgs"')
            for pkg in removed_pkgs:
                if pkg in pkgs:
                    log_fail('Package "%s" not removed' % pkg)
        else:
            logging.info('Check if service stopped')
            vmtools_service = params.get('service_name')
            status = utils_misc.get_guest_service_status(
                vmcheck.session, vmtools_service)
            logging.info('Service %s status: %s', vmtools_service, status)
            if status != 'inactive':
                log_fail('Service "%s" is not stopped' % vmtools_service)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = r'\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_resume_swap(vmcheck):
        """
        Check the content of grub files meet expectation.
        """
        if os_version == 'rhel7':
            chkfiles = [
                '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg'
            ]
        if os_version == 'rhel6':
            chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf']
        for file_i in chkfiles:
            status, content = vmcheck.run_cmd('cat %s' % file_i)
            if status != 0:
                log_fail('%s does not exist' % file_i)
            resume_dev_count = content.count('resume=/dev/')
            if resume_dev_count == 0 or resume_dev_count != content.count(
                    'resume=/dev/vd'):
                reason = 'Maybe the VM\'s swap pariton is lvm'
                log_fail('Content of %s is not correct or %s' %
                         (file_i, reason))

        content = vmcheck.session.cmd('cat /proc/cmdline')
        logging.debug('Content of /proc/cmdline:\n%s', content)
        if 'resume=/dev/vd' not in content:
            log_fail('Content of /proc/cmdline is not correct')

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe':
            r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"'
        }
        # rhev-apt.ext is removed on rhel9
        if utils_v2v.multiple_versions_compare(V2V_UNSUPPORT_RHEV_APT_VER):
            file_path.pop('rhev-apt.exe')
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if status == 0:
                logging.info('%s exists' % key)
            else:
                log_fail('%s does not exist after convert to rhv' % key)

    def check_file_architecture(vmcheck):
        """
        Check the 3rd party module info

        :param vmcheck: VMCheck object for vm checking
        """
        content = vmcheck.session.cmd('uname -r').strip()
        status = vmcheck.session.cmd_status(
            'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content)
        if status == 0:
            log_fail('3rd party module info is not correct')
        else:
            logging.info(
                'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package'
                % content)

    def check_windows_signature(vmcheck, full_name):
        """
        Check signature of a file in windows VM

        :param vmcheck: VMCheck object for vm checking
        :param full_name: a file's full path name
        """
        logging.info(
            'powershell or signtool needs to be installed in guest first')

        cmds = [('powershell "Get-AuthenticodeSignature %s | format-list"' %
                 full_name, r'SignerCertificate.*?Not After](.*?)\[Thumbprint',
                 '%m/%d/%Y %I:%M:%S %p'),
                ('signtool verify /v %s' % full_name,
                 r'Issued to: Red Hat.*?Expires:(.*?)SHA1 hash', '')]
        for cmd, ptn, fmt in cmds:
            _, output = vmcheck.run_cmd(cmd)
            if re.search(ptn, output, re.S):
                expire_time = re.search(ptn, output, re.S).group(1).strip()
                if fmt:
                    expire_time = time.strptime(expire_time, fmt)
                else:
                    expire_time = time.strptime(expire_time)
                if time.time() > time.mktime(expire_time):
                    test.fail("Signature of '%s' has expired" % full_name)
                return
        # Get here means the guest doesn't have powershell or signtool
        test.error("Powershell or Signtool must be installed in guest")

    def check_windows_vmware_tools(vmcheck):
        """
        Check vmware tools is uninstalled in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def _get_vmware_info(cmd):
            _, res = vmcheck.run_cmd(cmd)
            if res and not re.search('vmtools', res, re.I):
                return True
            return False

        cmds = ['tasklist', 'sc query vmtools']
        for cmd in cmds:
            res = utils_misc.wait_for(lambda: _get_vmware_info(cmd),
                                      600,
                                      step=30)
            if not res:
                test.fail("Failed to verification vmtools uninstallation")

    def check_windows_service(vmcheck, service_name):
        """
        Check service in VM

        :param vmcheck: VMCheck object for vm checking
        :param service_name: a service's name
        """
        try:
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info(service_name), re.I),
                                      600,
                                      step=30)
        except (ShellProcessTerminatedError, ShellStatusError):
            # Windows guest may reboot after installing qemu-ga service
            logging.debug('Windows guest is rebooting')
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            # VM boots up is extremely slow when all testing in running on
            # rhv server simultaneously, so set timeout to 1200.
            vmcheck.create_session(timeout=1200)
            res = utils_misc.wait_for(lambda: re.search(
                'running', vmcheck.get_service_info(service_name), re.I),
                                      600,
                                      step=30)

        if not res:
            test.fail('Not found running %s service' % service_name)

    def check_linux_ogac(vmcheck):
        """
        Check qemu-guest-agent service in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def get_pkgs(pkg_path):
            """
            Get all qemu-guest-agent pkgs
            """
            pkgs = []
            for _, _, files in os.walk(pkg_path):
                for file_name in files:
                    pkgs.append(file_name)
            return pkgs

        def get_pkg_version_vm():
            """
            Get qemu-guest-agent version in VM
            """
            vendor = vmcheck.get_vm_os_vendor()
            if vendor in ['Ubuntu', 'Debian']:
                cmd = 'dpkg -l qemu-guest-agent'
            else:
                cmd = 'rpm -q qemu-guest-agent'
            _, output = vmcheck.run_cmd(cmd)

            pkg_ver_ptn = [
                r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +',
                r'qemu-guest-agent-(.*?)\.x86_64'
            ]

            for ptn in pkg_ver_ptn:
                if re.search(ptn, output):
                    return re.search(ptn, output).group(1)
            return ''

        if os.path.isfile(os.getenv('VIRTIO_WIN')):
            mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'),
                                              'rhv_tools_setup_iso',
                                              fstype='iso9660')
            export_path = params['tmp_mount_point'] = mount_point
        else:
            export_path = os.getenv('VIRTIO_WIN')

        qemu_guest_agent_dir = os.path.join(export_path, qa_path)
        all_pkgs = get_pkgs(qemu_guest_agent_dir)
        logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs)
        vm_pkg_ver = get_pkg_version_vm()
        logging.debug('qemu-guest-agent version in vm: %s' % vm_pkg_ver)

        # Check the service status of qemu-guest-agent in VM
        status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running'
        cmd = 'service qemu-ga status;systemctl status qemu-guest-agent;systemctl status qemu-ga*'
        _, output = vmcheck.run_cmd(cmd)

        if not re.search(status_ptn, output):
            log_fail('qemu-guest-agent service exception')

    def check_ubuntools(vmcheck):
        """
        Check open-vm-tools, ubuntu-server in VM

        :param vmcheck: VMCheck object for vm checking
        """
        logging.info('Check if open-vm-tools service stopped')
        status = utils_misc.get_guest_service_status(vmcheck.session,
                                                     'open-vm-tools')
        logging.info('Service open-vm-tools status: %s', status)
        if status != 'inactive':
            log_fail('Service open-vm-tools is not stopped')
        else:
            logging.info('Check if the ubuntu-server exist')
            content = vmcheck.session.cmd('dpkg -s ubuntu-server')
            if 'install ok installed' in content:
                logging.info('ubuntu-server has not been removed.')
            else:
                log_fail('ubuntu-server has been removed')

    def global_pem_setup(f_pem):
        """
        Setup global rhv server ca

        :param f_pem: ca file path
        """
        ca_anchors_dir = '/etc/pki/ca-trust/source/anchors'
        shutil.copy(f_pem, ca_anchors_dir)
        process.run('update-ca-trust extract', shell=True)
        os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem)))

    def global_pem_cleanup():
        """
        Cleanup global rhv server ca
        """
        process.run('update-ca-trust extract', shell=True)

    def find_net(bridge_name):
        """
        Find which network use specified bridge

       :param bridge_name: bridge name you want to find
        """
        net_list = virsh.net_state_dict(only_names=True)
        net_name = ''
        if len(net_list):
            for net in net_list:
                net_info = virsh.net_info(net).stdout.strip()
                search = re.search(r'Bridge:\s+(\S+)', net_info)
                if search:
                    if bridge_name == search.group(1):
                        net_name = net
        else:
            logging.info('Conversion server has no network')
        return net_name

    def destroy_net(net_name):
        """
        destroy network in conversion server
        """
        if virsh.net_state_dict()[net_name]['active']:
            logging.info("Remove network %s in conversion server", net_name)
            virsh.net_destroy(net_name)
            if virsh.net_state_dict()[net_name]['autostart']:
                virsh.net_autostart(net_name, "--disable")
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def start_net(net_name):
        """
        start network in conversion server
        """
        logging.info("Recover network %s in conversion server", net_name)
        virsh.net_autostart(net_name)
        if not virsh.net_state_dict()[net_name]['active']:
            virsh.net_start(net_name)
        output = virsh.net_list("--all").stdout.strip()
        logging.info(output)

    def check_static_ip_conf(vmcheck):
        """
        Check static IP configuration in VM

        :param vmcheck: VMCheck object for vm checking
        """
        def _static_ip_check():
            cmd = 'ipconfig /all'
            _, output = vmcheck.run_cmd(cmd, debug=False)
            v2v_cmd = params_get(params, 'v2v_command')
            # --mac 00:50:56:ac:7a:4d:ip:192.168.1.2,192.168.1.1,22,192.168.1.100,10.73.2.108,10.66.127.10'
            mac_ip_pattern = '--mac (([0-9a-zA-Z]{2}:){6})ip:([0-9,.]+)'
            ip_config_list = re.search(mac_ip_pattern, v2v_cmd).group(3)
            mac_addr = re.search(mac_ip_pattern,
                                 v2v_cmd).group(1)[0:-1].upper().replace(
                                     ':', '-')
            eth_adapter_ptn = r'Ethernet adapter Ethernet.*?NetBIOS over Tcpip'

            try:
                ipconfig = [
                    v for v in re.findall(eth_adapter_ptn, output, re.S)
                    if mac_addr in v
                ][0]
            except IndexError:
                return False

            for i, value in enumerate(ip_config_list.split(',')):
                if not value:
                    continue
                # IP address
                if i == 0:
                    ip_addr = r'IPv4 Address.*?: %s' % value
                    if not re.search(ip_addr, ipconfig, re.S):
                        logging.debug('Found IP addr failed')
                        return False
                # Default gateway
                if i == 1:
                    ip_gw = r'Default Gateway.*?: .*?%s' % value
                    if not re.search(ip_gw, ipconfig, re.S):
                        logging.debug('Found Gateway failed')
                        return False
                # Subnet mask
                if i == 2:
                    # convert subnet mask to cidr
                    bin_mask = '1' * int(value) + '0' * (32 - int(value))
                    cidr = '.'.join([
                        str(int(bin_mask[i * 8:i * 8 + 8], 2))
                        for i in range(4)
                    ])
                    sub_mask = r'Subnet Mask.*?: %s' % cidr
                    if not re.search(sub_mask, ipconfig, re.S):
                        logging.debug('Found subnet mask failed')
                        return False
                # DNS server list
                if i >= 3:
                    dns_server = r'DNS Servers.*?:.*?%s' % value
                    if not re.search(dns_server, ipconfig, re.S):
                        logging.debug('Found DNS Server failed')
                        return False
            return True

        try:
            vmcheck.create_session()
            res = utils_misc.wait_for(_static_ip_check, 1800, step=300)
        except (ShellTimeoutError, ShellProcessTerminatedError):
            logging.debug(
                'Lost connection to windows guest, the static IP may take effect'
            )
            if vmcheck.session:
                vmcheck.session.close()
                vmcheck.session = None
            vmcheck.create_session()
            res = utils_misc.wait_for(_static_ip_check, 300, step=30)
        vmcheck.run_cmd('ipconfig /all')  # debug msg
        if not res:
            test.fail('Checking static IP configuration failed')

    def check_rhsrvany_checksums(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        def _get_expected_checksums(tool_exec, file):
            val = process.run('%s %s' % (tool_exec, rhsrvany_path),
                              shell=True).stdout_text.split()[0]

            if not val:
                test.error('Get checksum failed')
            logging.info('%s: Expect %s: %s', file, tool_exec, val)
            return val

        def _get_real_checksums(algorithm, file):
            certutil_cmd = r'certutil -hashfile "%s"' % file
            if algorithm == 'md5':
                certutil_cmd += ' MD5'

            res = vmcheck.session.cmd_output(certutil_cmd, safe=True)
            logging.debug('%s output:\n%s', certutil_cmd, res)

            val = res.strip().splitlines()[1].strip()
            logging.info('%s: Real %s: %s', file, algorithm, val)
            return val

        logging.info('Check md5 and sha1 of rhsrvany.exe')

        algorithms = {'md5': 'md5sum', 'sha1': 'sha1sum'}

        rhsrvany_path = r'/usr/share/virt-tools/rhsrvany.exe'
        rhsrvany_path_windows = r"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"

        for key, val in algorithms.items():
            expect_val = _get_expected_checksums(val, rhsrvany_path)
            real_val = _get_real_checksums(key, rhsrvany_path_windows)
            if expect_val == real_val:
                logging.info('%s are correct', key)
            else:
                test.fail('%s of rhsrvany.exe is not correct' % key)

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after conversion
            logging.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controlled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                if not utils_v2v.multiple_versions_compare(
                        V2V_UNSUPPORT_RHEV_APT_VER):
                    check_windows_signature(vmchecker.checker,
                                            r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml:
                test.fail('CDROM no longer exists')
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'vmware_tools' in checkpoint:
                check_windows_vmware_tools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            if 'rhsrvany_checksum' in checkpoint:
                check_rhsrvany_checksums(vmchecker.checker)
            if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link):
                test.fail("checkpoint '%s' failed" % checkpoint)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
            # Virtio drivers will not be installed without virtio-win setup
            if 'virtio_win_unset' in checkpoint:
                missing_list = params.get('missing').split(',')
                expect_errors = ['Not find driver: ' + x for x in missing_list]
                logging.debug('Expect errors: %s' % expect_errors)
                logging.debug('Actual errors: %s' % error_list)
                if set(error_list) == set(expect_errors):
                    error_list[:] = []
                else:
                    logging.error('Virtio drivers not meet expectation')

        utils_v2v.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        # VM or local output checking
        vm_check(status_error)
        # Check log size decrease option
        if 'log decrease' in checkpoint:
            nbdkit_option = r'nbdkit\.backend\.datapath=0'
            if not re.search(nbdkit_option, output):
                test.fail("checkpoint '%s' failed" % checkpoint)
        if 'fstrim_warning' in checkpoint:
            # Actually, fstrim has no relationship with v2v, it may be related
            # to kernel, this warning really doesn't matter and has no harm to
            # the conversion.
            V2V_FSTRIM_SUCESS_VER = "[virt-v2v-1.45.1-1.el9,)"
            if utils_v2v.multiple_versions_compare(V2V_FSTRIM_SUCESS_VER):
                params.update({'expect_msg': None})
        # Log checking
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        if version_requried and not utils_v2v.multiple_versions_compare(
                version_requried):
            test.cancel("Testing requires version: %s" % version_requried)

        # See man virt-v2v-input-xen(1)
        if enable_legacy_cp:
            process.run('update-crypto-policies --set LEGACY',
                        verbose=True,
                        ignore_status=True,
                        shell=True)

        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'esx',
            'main_vm': vm_name,
            'vpx_dc': vpx_dc,
            'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': v2v_opts,
            'input_mode': 'libvirt',
            'os_storage': os_storage,
            'os_pool': os_pool,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'password': vpx_passwd if src_uri_type != 'esx' else esxi_password,
            'input_transport': input_transport,
            'vcenter_host': vpx_hostname,
            'vcenter_password': vpx_passwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'src_uri_type': src_uri_type,
            'esxi_password': esxi_password,
            'esxi_host': esxi_host,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'oo_json_disk_pattern': json_disk_pattern,
            'cmd_has_ip': cmd_has_ip,
            'params': params
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd_file = params.get("vpx_passwd_file")
        with open(vpx_passwd_file, 'w') as pwd_f:
            if src_uri_type == 'esx':
                pwd_f.write(esxi_password)
            else:
                pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'of_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            logging.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if 'root' in checkpoint and 'ask' in checkpoint:
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '2')
        if 'root' in checkpoint and 'ask' not in checkpoint:
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if 'with_proxy' in checkpoint:
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy,
                         https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if 'ogac' in checkpoint:
            os.environ['VIRTIO_WIN'] = virtio_win_path
            if not os.path.exists(os.getenv('VIRTIO_WIN')):
                test.fail('%s does not exist' % os.getenv('VIRTIO_WIN'))

            if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux':
                export_path = os.getenv('VIRTIO_WIN')
                qemu_guest_agent_dir = os.path.join(export_path, qa_path)
                if not os.path.exists(qemu_guest_agent_dir) and os.access(
                        export_path, os.W_OK) and qa_url:
                    logging.debug(
                        'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,'
                        ' Try to prepare it manually. This is not a permanent step, once'
                        ' the official build includes it, this step should be removed.'
                    )
                    os.makedirs(qemu_guest_agent_dir)
                    rpm_name = os.path.basename(qa_url)
                    download.get_file(
                        qa_url, os.path.join(qemu_guest_agent_dir, rpm_name))

        if 'virtio_iso_blk' in checkpoint:
            if not os.path.exists(virtio_win_path):
                test.fail('%s does not exist' % virtio_win_path)

            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup a loop device
            cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path)
            process.run(cmd, shell=True)
            os.environ['VIRTIO_WIN'] = free_loop_dev

        if 'block_dev' in checkpoint:
            os_directory = params_get(params, 'os_directory')
            block_count = params_get(params, 'block_count')
            os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_',
                                                       dir=os_directory)
            diskimage = '%s/diskimage' % os_directory.name
            # Update 'os_directory' for '-os' option
            params['os_directory'] = os_directory.name

            # Create a 1G image
            cmd = 'dd if=/dev/zero of=%s bs=10M count=%s' % (diskimage,
                                                             block_count)
            process.run(cmd, shell=True)
            # Build filesystem
            cmd = 'mkfs.ext4 %s' % diskimage
            process.run(cmd, shell=True)
            # Find a free loop device
            free_loop_dev = process.run("losetup --find",
                                        shell=True).stdout_text.strip()
            # Setup the image as a block device
            cmd = 'losetup %s %s' % (free_loop_dev, diskimage)
            process.run(cmd, shell=True)
            # Create a soft link to the loop device
            blk_dev_link = '%s/mydisk1' % os_directory.name
            cmd = 'ln -s %s %s' % (free_loop_dev, blk_dev_link)
            process.run(cmd, shell=True)

        if 'invalid_pem' in checkpoint:
            # simply change the 2nd line to lowercase to get an invalid pem
            with open(local_ca_file_path, 'r+') as fd:
                for i in range(2):
                    pos = fd.tell()
                    res = fd.readline()
                fd.seek(pos)
                fd.write(res.lower())
                fd.flush()

        if 'without_default_net' in checkpoint:
            net_name = find_net('virbr0')
            if net_name:
                destroy_net(net_name)

        if 'bandwidth' in checkpoint:
            dynamic_speeds = params_get(params, 'dynamic_speeds')
            bandwidth_file = params_get(params, 'bandwidth_file')
            with open(bandwidth_file, 'w') as fd:
                fd.write(dynamic_speeds)

        if checkpoint[0].startswith('virtio_win'):
            cp = checkpoint[0]
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if cp.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if cp.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if cp.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if cp.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path

        if 'luks_dev_keys' in checkpoint:
            luks_password = params_get(params, 'luks_password', '')
            luks_keys = params_get(params, 'luks_keys', '')
            keys_options = ' '.join(
                list(
                    map(lambda i: '--key %s' % i if i else '',
                        luks_keys.split(';'))))

            if 'invalid_pwd_file' not in checkpoint:
                is_file_key = r'--key \S+:file:(\S+)'
                for file_key in re.findall(is_file_key, keys_options):
                    with open(file_key, 'w') as fd:
                        fd.write(luks_password)
            v2v_params['v2v_opts'] += ' ' + keys_options

        if 'empty_cdrom' in checkpoint:
            virsh_dargs = {
                'uri': remote_uri,
                'remote_ip': remote_host,
                'remote_user': '******',
                'remote_pwd': vpx_passwd,
                'auto_close': True,
                'debug': True
            }
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
            remote_virsh.close_session()
        else:
            if 'exist_uuid' in checkpoint:
                auto_clean = False
            if checkpoint[0] in [
                    'mismatched_uuid', 'no_uuid', 'invalid_source',
                    'system_rhv_pem'
            ]:
                cmd_only = True
                auto_clean = False
            v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only,
                                           interaction_run)
        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        if 'system_rhv_pem' in checkpoint:
            if 'set' in checkpoint:
                global_pem_setup(local_ca_file_path)
            rhv_cafile = r'-oo rhv-cafile=\S+\s*'
            new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_cafile)
            logging.debug('New v2v command:\n%s', new_cmd)
        if 'mismatched_uuid' in checkpoint:
            # append more uuid
            new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4())
        if 'no_uuid' in checkpoint:
            rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*'
            new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_disk_uuid)
            logging.debug('New v2v command:\n%s', new_cmd)
        if 'exist_uuid' in checkpoint:
            # Use to cleanup the VM because it will not be run in check_result
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            # Update name to avoid conflict
            new_vm_name = v2v_params['new_name'] + '_exist_uuid'
            new_cmd = v2v_result.command.replace('-on %s' % vm_name,
                                                 '-on %s' % new_vm_name)
            new_cmd += ' --no-copy'
            logging.debug('re-run v2v command:\n%s', new_cmd)
        if 'invalid_source' in checkpoint:
            if params.get('invalid_vpx_hostname'):
                new_cmd = v2v_result.replace(
                    vpx_hostname, params.get('invalid_vpx_hostname'))
            if params.get('invalid_esx_hostname'):
                new_cmd = v2v_result.replace(
                    esxi_host, params.get('invalid_esx_hostname'))

        if checkpoint[0] in [
                'mismatched_uuid', 'no_uuid', 'invalid_source', 'exist_uuid',
                'system_rhv_pem'
        ]:
            v2v_result = utils_v2v.cmd_run(new_cmd,
                                           params.get('v2v_dirty_resources'))

        check_result(v2v_result, status_error)

    finally:
        if enable_legacy_cp:
            process.run('update-crypto-policies --set DEFAULT',
                        verbose=True,
                        ignore_status=True,
                        shell=True)
        if checkpoint[0].startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
        if 'virtio_win_iso_mount' in checkpoint:
            process.run('umount /opt', ignore_status=True)
        if 'ogac' in checkpoint and params.get('tmp_mount_point'):
            if os.path.exists(params.get('tmp_mount_point')):
                utils_misc.umount(os.getenv('VIRTIO_WIN'),
                                  params['tmp_mount_point'], 'iso9660')
            os.environ.pop('VIRTIO_WIN')
        if 'block_dev' in checkpoint and hasattr(os_directory, 'name'):
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os_directory.cleanup()
        if 'virtio_iso_blk' in checkpoint:
            process.run('losetup -d %s' % free_loop_dev, shell=True)
            os.environ.pop('VIRTIO_WIN')
        if 'system_rhv_pem' in checkpoint and 'set' in checkpoint:
            global_pem_cleanup()
        if 'without_default_net' in checkpoint:
            if net_name:
                start_net(net_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            logging.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if 'with_proxy' in checkpoint:
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
        if unprivileged_user:
            process.system("userdel -fr %s" % unprivileged_user)
        if params.get('os_directory') and os.path.isdir(
                params['os_directory']):
            shutil.rmtree(params['os_directory'], ignore_errors=True)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Example #5
0
def run(test, params, env):
    """
    Test virsh vol-create-from command to cover the following matrix:

    pool = [source, destination]
    pool_type = [dir, disk, fs, logical, netfs, iscsi, scsi]
    volume_format = [raw, qcow2, qed]

    Note, both 'iscsi' and 'scsi' type pools don't support create volume by
    virsh, so which can't be destination pools. And for disk pool, it can't
    create volume with specified format.
    """

    src_pool_type = params.get("src_pool_type")
    src_pool_target = params.get("src_pool_target")
    src_emulated_image = params.get("src_emulated_image")
    src_vol_format = params.get("src_vol_format")
    dest_pool_type = params.get("dest_pool_type")
    dest_pool_target = params.get("dest_pool_target")
    dest_emulated_image = params.get("dest_emulated_image")
    dest_vol_format = params.get("dest_vol_format")
    prealloc_option = params.get("prealloc_option")
    status_error = params.get("status_error", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if "--prealloc-metadata" in prealloc_option:
            test.cancel("metadata preallocation not supported in"
                        " current libvirt version.")

    vol_file = ""
    try:
        # Create the src/dest pool
        src_pool_name = "virt-%s-pool" % src_pool_type
        dest_pool_name = "virt-%s-pool" % dest_pool_type

        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(src_pool_name,
                     src_pool_type,
                     src_pool_target,
                     src_emulated_image,
                     image_size="40M",
                     pre_disk_vol=["1M"])

        if src_pool_type != dest_pool_type:
            pvt.pre_pool(dest_pool_name,
                         dest_pool_type,
                         dest_pool_target,
                         dest_emulated_image,
                         image_size="100M",
                         pre_disk_vol=["1M"])

        # Print current pools for debugging
        logging.debug("Current pools:%s",
                      libvirt_storage.StoragePool().list_pools())

        # Create the src vol
        # 8M is the minimal size for logical volume(PPC)
        # 4M is the minimal size for logical volume(x86)
        vol_size = params.get("image_volume_size", "16777216")
        if src_pool_type in ["dir", "logical", "netfs", "fs"]:
            src_vol_name = "src_vol"
            pvt.pre_vol(vol_name=src_vol_name,
                        vol_format=src_vol_format,
                        capacity=vol_size,
                        allocation=None,
                        pool_name=src_pool_name)
        else:
            src_vol_name = list(utlv.get_vol_list(src_pool_name).keys())[0]
        # Prepare vol xml file
        dest_vol_name = "dest_vol"
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if dest_pool_type == "disk":
            dest_vol_name = utlv.new_disk_vol_name(dest_pool_name)
            if dest_vol_name is None:
                test.error("Fail to generate volume name")
        if dest_pool_type == "disk":
            dest_vol_format = ""
            prealloc_option = ""
        vol_xml = """
<volume>
  <name>%s</name>
  <capacity unit='bytes'>%s</capacity>
  <target>
    <format type='%s'/>
  </target>
</volume>
""" % (dest_vol_name, vol_size, dest_vol_format)
        logging.debug("Prepare the volume xml: %s", vol_xml)
        vol_file = os.path.join(test.tmpdir, "dest_vol.xml")
        with open(vol_file, 'w') as xml_object:
            xml_object.write(vol_xml)

        # iSCSI and SCSI type pool can't create vols via virsh
        if dest_pool_type in ["iscsi", "scsi"]:
            test.fail("Unsupport create vol for %s type pool" % dest_pool_type)
        # Metadata preallocation is not supported for block volumes
        if dest_pool_type in ["disk", "logical"]:
            prealloc_option = ""
        # Run run_virsh_vol_create_from to create dest vol
        cmd_result = virsh.vol_create_from(dest_pool_name,
                                           vol_file,
                                           src_vol_name,
                                           src_pool_name,
                                           prealloc_option,
                                           ignore_status=True,
                                           debug=True)
        status = cmd_result.exit_status

        # Check result
        if status_error == "no":
            if status == 0:
                dest_pv = libvirt_storage.PoolVolume(dest_pool_name)
                dest_volumes = list(dest_pv.list_volumes().keys())
                logging.debug("Current volumes in %s: %s", dest_pool_name,
                              dest_volumes)
                if dest_vol_name not in dest_volumes:
                    test.fail("Can't find volume: % from pool: %s" %
                              (dest_vol_name, dest_pool_name))
            else:
                test.fail(cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                test.fail("Expect fail, but run successfully!")
    finally:
        # Cleanup: both src and dest should be removed
        try:
            pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
                             src_emulated_image)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if src_pool_type != dest_pool_type:
            pvt.cleanup_pool(dest_pool_name, dest_pool_type, dest_pool_target,
                             dest_emulated_image)
        if os.path.isfile(vol_file):
            os.remove(vol_file)
Example #6
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        test.cancel("This version of libvirt does not support domblkerror "
                    "test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")
    ubuntu = distro.detect().name == 'Ubuntu'
    rhel = distro.detect().name == 'rhel'
    nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server")
    nfs_service = None
    selinux_bool = None
    session = None
    selinux_bak = ""

    vm = env.get_vm(vm_name)
    if error_type == "unspecified error":
        selinux_local = params.get("setup_selinux_local", "yes") == "yes"
        if not ubuntu and not rhel:
            nfs_service_package = "nfs"
        elif rhel:
            nfs_service_package = "nfs-server"
        if not rhel and not utils_package.package_install(nfs_service_package):
            test.cancel("NFS package not available in host to test")
        # backup /etc/exports
        shutil.copyfile(export_file, "%s.bak" % export_file)
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        process.run("qemu-img create %s %s" %
                    (os.path.join(img_dir, img_name), img_size),
                    shell=True, verbose=True)

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = libvirt.setup_or_cleanup_nfs(is_setup=True,
                                               mount_dir=nfs_dir,
                                               is_mount=False,
                                               export_options=mount_opt,
                                               export_dir=img_dir)
            if not ubuntu:
                selinux_bak = res["selinux_status_bak"]
            process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                        "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True,
                        verbose=True)
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service(nfs_service_package)
            if not ubuntu and selinux_local:
                params['set_sebool_local'] = "yes"
                params['local_boolean_varible'] = "virt_use_nfs"
                params['local_boolean_value'] = "on"
                selinux_bool = utils_misc.SELinuxBoolean(params)
                selinux_bool.setup()

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            _pool_vol = None
            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.driver = {'name': "qemu",
                           'type': "raw",
                           'cache': "none",
                           'error_policy': "stop"}
        img_disk.target = {'dev': target_dev,
                           'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk_list_debug = %s", bef_list)

        # Attach disk to guest
        ret = virsh.attach_device(vm_name, img_disk.xml)
        if ret.exit_status != 0:
            test.fail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk list after attaching - %s", aft_list)
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # install dependent packages
            pkg_list = ["parted", "e2fsprogs"]
            for pkg in pkg_list:
                if not utils_package.package_install(pkg, session):
                    test.error("Failed to install dependent package %s" % pkg)

            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext3 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception as err:
                logging.debug("Expected Fail %s", err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            if nfs_service is not None:
                nfs_service.stop()
                logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            # If not paused, perform one more IO operation to the mnt disk
            session = vm.wait_for_login()
            session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir)
            if not utils_misc.wait_for(_check_state, 60):
                test.fail("Guest does not paused, it is %s now" % vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    test.fail("Failed to get expect result, get %s" %
                              output.stdout.strip())
            else:
                test.fail("Fail to get domblkerror info:%s" % output.stderr)
    finally:
        logging.info("Do clean steps")
        if session:
            session.close()
        if error_type == "unspecified error":
            if nfs_service is not None:
                nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            res = libvirt.setup_or_cleanup_nfs(is_setup=False,
                                               mount_dir=nfs_dir,
                                               export_dir=img_dir,
                                               restore_selinux=selinux_bak)
            if selinux_bool:
                selinux_bool.cleanup(keep_authorized_keys=True)
        elif error_type == "no space":
            vm.destroy()
            if _pool_vol:
                _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
        data_dir.clean_tmp_files()
Example #7
0
def run(test, params, env):
    """
    Convert specific esx guest
    """
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            test.skip("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vpx_hostname = params.get('vpx_hostname')
    esx_ip = params.get('esx_hostname')
    vpx_dc = params.get('vpx_dc')
    vm_name = params.get('main_vm')
    output_mode = params.get('output_mode')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    error_list = []
    remote_host = vpx_hostname

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_device_exist(check, virsh_session_id):
        """
        Check if device exist after convertion
        """
        xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
        if check == 'cdrom':
            if "device='cdrom'" not in xml:
                log_fail('CDROM no longer exists')

    def check_vmtools(vmcheck):
        """
        Check whether vmware tools packages have been removed
        """
        pkgs = vmcheck.session.cmd('rpm -qa').strip()
        removed_pkgs = params.get('removed_pkgs').strip().split(',')
        if not removed_pkgs:
            test.error('Missing param "removed_pkgs"')
        for pkg in removed_pkgs:
            if pkg in pkgs:
                log_fail('Package "%s" not removed' % pkg)

    def check_modprobe(vmcheck):
        """
        Check whether content of /etc/modprobe.conf meets expectation
        """
        content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip()
        logging.debug(content)
        cfg_content = params.get('cfg_content')
        if not cfg_content:
            test.error('Missing content for search')
        logging.info('Search "%s" in /etc/modprobe.conf', cfg_content)
        pattern = '\s+'.join(cfg_content.split())
        if not re.search(pattern, content):
            log_fail('Not found "%s"' % cfg_content)

    def check_device_map(vmcheck):
        """
        Check if the content of device.map meets expectation.
        """
        logging.info(vmcheck.session.cmd('fdisk -l').strip())
        device_map = params.get('device_map_path')
        content = vmcheck.session.cmd('cat %s' % device_map)
        logging.debug('Content of device.map:\n%s', content)
        logging.info('Found device: %d', content.count('/dev/'))
        logging.info('Found virtio device: %d', content.count('/dev/vd'))
        if content.count('/dev/') != content.count('/dev/vd'):
            log_fail('Content of device.map not correct')
        else:
            logging.info('device.map has been remaped to "/dev/vd*"')

    def check_snapshot_file(vmcheck):
        """
        Check if the removed file exists after conversion
        """
        removed_file = params.get('removed_file')
        logging.debug(vmcheck.session.cmd('test -f %s' % removed_file).stderr)
        if vmcheck.session.cmd('test -f %s' % removed_file).stderr == 0:
            log_fail('Removed file "%s" exists after conversion')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if checkpoint == 'empty_cdrom':
            if status_error:
                log_fail('Virsh dumpxml failed for empty cdrom image')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name)
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if checkpoint not in ['GPO_AV', 'ovmf']:
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'cdrom':
                virsh_session = utils_sasl.VirshSessionSASL(params)
                virsh_session_id = virsh_session.get_id()
                check_device_exist('cdrom', virsh_session_id)
            if checkpoint == 'vmtools':
                check_vmtools(vmchecker.checker)
            if checkpoint == 'modprobe':
                check_modprobe(vmchecker.checker)
            if checkpoint == 'device_map':
                check_device_map(vmchecker.checker)
            if checkpoint == 'snapshot':
                check_snapshot_file(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' % (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name,
            'vpx_dc': vpx_dc, 'esx_ip': esx_ip,
            'new_name': vm_name + utils_misc.generate_random_string(4),
            'v2v_opts': '-v -x', 'input_mode': 'libvirt',
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge':  params.get('bridge'),
            'target':  params.get('target')
        }

        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        v2v_uri = utils_v2v.Uri('esx')
        remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip)

        # Create password file for access to ESX hypervisor
        vpx_passwd = params.get("vpx_password")
        logging.debug(vpx_passwd)
        vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "vpx_passwd")
        with open(vpx_passwd_file, 'w') as pwd_f:
            pwd_f.write(vpx_passwd)
        v2v_params['v2v_opts'] += " --password-file %s" % vpx_passwd_file

        if params.get('output_format'):
            v2v_params.update({'output_format': params['output_format']})
        # Rename guest with special name while converting to rhev
        if '#' in vm_name and output_mode == 'rhev':
            v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_')

        # Create SASL user on the ovirt host
        if output_mode == 'rhev':
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        if checkpoint == 'ovmf':
            utils_package.package_install('OVMF')
        if checkpoint == 'root_ask':
            v2v_params['v2v_opts'] += ' --root ask'
            v2v_params['custom_inputs'] = params.get('choice', '1')
        if checkpoint.startswith('root_') and checkpoint != 'root_ask':
            root_option = params.get('root_option')
            v2v_params['v2v_opts'] += ' --root %s' % root_option
        if checkpoint == 'copy_to_local':
            esx_password = params.get('esx_password')
            esx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "esx_passwd")
            logging.info('Prepare esx password file')
            with open(esx_passwd_file, 'w') as pwd_f:
                pwd_f.write(esx_password)
            esx_uri = 'esx://root@%s/?no_verify=1' % esx_ip
            copy_cmd = 'virt-v2v-copy-to-local -ic %s %s --password-file %s' %\
                       (esx_uri, vm_name, esx_passwd_file)
            process.run(copy_cmd)
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params['input_file'] = '%s.xml' % vm_name
        if checkpoint == 'with_proxy':
            http_proxy = params.get('esx_http_proxy')
            https_proxy = params.get('esx_https_proxy')
            logging.info('Set http_proxy=%s, https_proxy=%s',
                         http_proxy, https_proxy)
            os.environ['http_proxy'] = http_proxy
            os.environ['https_proxy'] = https_proxy

        if checkpoint == 'empty_cdrom':
            virsh_dargs = {'uri': remote_uri, 'remote_ip': remote_host,
                           'remote_user': '******', 'remote_pwd': vpx_passwd,
                           'debug': True}
            remote_virsh = virsh.VirshPersistent(**virsh_dargs)
            v2v_result = remote_virsh.dumpxml(vm_name)
        else:
            v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.has_key('new_name'):
            params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)

    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if checkpoint == 'with_proxy':
            logging.info('Unset http_proxy&https_proxy')
            os.environ.pop('http_proxy')
            os.environ.pop('https_proxy')
def run(test, params, env):
    """
    Test command: virsh pool-edit.

    Edit the XML configuration for a storage pool('dir' type as default).
    1) Edit pool by different methods.
    2) Check the edit result and cleanup env.
    """

    pool_ref = params.get("pool_ref", "name")
    pool_name = params.get("pool_name", "default")
    pool_uuid = params.get("pool_uuid", "")
    pool_exist = "yes" == params.get("pool_exist", "yes")
    status_error = "yes" == params.get("status_error", "no")
    pool_type = params.get("pool_type", "dir")
    pool_target = os.path.join(data_dir.get_tmp_dir(),
                               params.get("pool_target", "pool_target"))
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    emulated_image = params.get("emulated_image", "emulated-image-disk")
    edit_target = params.get("edit_target", "target_path")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    redefine_pool_flag = False
    pool = pool_name
    if pool_ref == "uuid":
        pool = pool_uuid
    poolxml = pool_xml.PoolXML()
    libvirt_pool = libvirt_storage.StoragePool()
    poolvolune_test = libvirt.PoolVolumeTest(test, params)
    check_pool_name = pool_name
    new_path = ""
    try:
        if pool_exist and not status_error:
            if libvirt_pool.pool_exists(pool_name):
                logging.debug("Find pool '%s' to edit.", pool_name)
                redefine_pool_flag = True
            else:
                logging.debug("Define pool '%s' as it not exist", pool_name)
                if pool_type == "gluster":
                    poolvolune_test.pre_pool(pool_name,
                                             pool_type,
                                             pool_target,
                                             emulated_image,
                                             source_name=source_name,
                                             source_path=source_path)
                else:
                    poolvolune_test.pre_pool(pool_name, pool_type, pool_target,
                                             emulated_image)
            if not pool_uuid and pool_ref == "uuid":
                pool = libvirt_pool.get_pool_uuid(pool_name)
            poolxml.xml = pool_xml.PoolXML().new_from_dumpxml(pool_name).xml
            logging.debug("Before edit pool:")
            poolxml.debug_xml()

            expect_value = ""
            # Test: Edit target path
            if edit_target == "pool_target_path":
                edit_cmd = []
                new_path = os.path.join(data_dir.get_tmp_dir(), "new_path")
                os.mkdir(new_path)
                edit_cmd.append(":%s/<path>.*</<path>" +
                                new_path.replace('/', '\/') + "<")
                pool_target = new_path
                expect_value = new_path
            # Test: Edit disk pool format type:
            elif edit_target == "pool_format_type":
                edit_cmd = []
                new_format_type = params.get("pool_format", "dos")
                edit_cmd.append(":%s/<format type=.*\/>/<format type='" +
                                new_format_type + "'\/>/")
                expect_value = new_format_type
            # Test: Refine(Delete uuid, edit pool name and target path)
            elif edit_target == "pool_redefine":
                edit_cmd = []
                new_pool_name = params.get("new_pool_name", "new_edit_pool")
                edit_cmd.append(":g/<uuid>/d")
                new_path = os.path.join(data_dir.get_tmp_dir(), "new_pool")
                os.mkdir(new_path)
                edit_cmd.append(":%s/<path>.*</<path>" +
                                new_path.replace('/', '\/') + "<")
                edit_cmd.append(":%s/<name>" + pool_name + "</<name>" +
                                new_pool_name + "<")
                pool_target = new_path
                check_pool_name = new_pool_name

            else:
                raise error.TestNAError("No edit method for %s" % edit_target)

            # run test and check the result
            logging.info("pool=%s", pool)
            edit_pool(pool, edit_cmd)
            if libvirt_pool.is_pool_active(pool_name):
                libvirt_pool.destroy_pool(pool_name)
            if not libvirt_pool.start_pool(check_pool_name):
                raise error.TestFail("Fail to start pool after edit it.")
            if not check_pool(check_pool_name, edit_target, expect_value):
                raise error.TestFail("Edit pool fail")
        elif not pool_exist and not status_error:
            raise error.TestFail(
                "Conflict condition: pool not exist and expect "
                "pool edit succeed.")
        else:
            # negative test
            output = virsh.pool_edit(pool)
            if output.exit_status:
                logging.info("Fail to do pool edit as expect: %s",
                             output.stderr.strip())
            else:
                redefine_pool_flag = True
                raise error.TestFail("Expect fail but do pool edit succeed.")
    finally:
        for pool in [pool_name, check_pool_name]:
            if libvirt_pool.pool_exists(pool):
                poolvolune_test.cleanup_pool(check_pool_name,
                                             pool_type,
                                             pool_target,
                                             emulated_image,
                                             source_name=source_name)
        if redefine_pool_flag:
            try:
                # poolxml could be empty if error happened when define pool
                poolxml.pool_define()
            finally:
                logging.error("Recover pool %s failed", pool_name)
        if os.path.exists(new_path):
            os.rmdir(new_path)
Example #9
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)

    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    source_user = params.get("username", "root")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("storage")
    no_root = 'yes' == params.get('no_root', 'no')
    mnt_point = params.get("mnt_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')
    error_flag = 'strict'

    def create_pool(user_pool=False,
                    pool_name=pool_name,
                    pool_target=pool_target):
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session" or user_pool:
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            process.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool(user_pool=False,
                     pool_name=pool_name,
                     pool_target=pool_target):
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session" or user_pool:
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            process.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            test.error("Fail to find tmp target file name when converting vm"
                       " disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = to_text(
                process.system_output("grep -R \"%s\" %s" %
                                      (ovf_id, export_vm_dir)))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            test.fail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            test.error("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    test.fail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    test.fail("%s is not a preallocated image" % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                test.fail("%s format is not %s" % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            test.fail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            test.fail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            test.fail("Not find message: %s" % init_msg)

    def check_ovf_snapshot_id(ovf_content):
        """
        Check if snapshot id in ovf file consists of '0's
        """
        search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content)
        if search:
            snapshot_id = search.group(1)
            logging.debug('vm_snapshot_id = %s', snapshot_id)
            if snapshot_id.count('0') >= 32:
                test.fail('vm_snapshot_id consists with "0"')
        else:
            test.fail('Fail to find snapshot_id')

    def setup_esx_ssh_key(hostname, user, password, port=22):
        """
        Setup up remote login in esx server by using public key
        """
        logging.debug('Performing SSH key setup on %s:%d as %s.' %
                      (hostname, port, user))
        try:
            session = remote.remote_login(client='ssh',
                                          host=hostname,
                                          username=user,
                                          port=port,
                                          password=password,
                                          prompt=r'[ $#%]')
            public_key = ssh_key.get_public_key()
            session.cmd("echo '%s' >> /etc/ssh/keys-root/authorized_keys; " %
                        public_key)
            logging.debug('SSH key setup complete.')
            session.close()
        except Exception as err:
            logging.debug('SSH key setup has failed. %s', err)

    def check_source(output):
        """
        Check if --print-source option print the correct info
        """
        # Parse source info
        source = output.split('\n')[2:]
        for i in range(len(source)):
            if source[i].startswith('\t'):
                source[i - 1] += source[i]
                source[i] = ''
        source_strip = [x.strip() for x in source if x.strip()]
        source_info = {}
        for line in source_strip:
            source_info[line.split(':')[0]] = line.split(':', 1)[1].strip()
        logging.debug('Source info to check: %s', source_info)
        checklist = [
            'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks',
            'NICs'
        ]
        if hypervisor in ['kvm', 'xen']:
            checklist.extend(['display', 'CPU features'])
        for key in checklist:
            if key not in source_info:
                test.fail('%s info missing' % key)

        v2v_virsh = None
        close_virsh = False
        if hypervisor == 'kvm':
            v2v_virsh = virsh
        else:
            virsh_dargs = {
                'uri': ic_uri,
                'remote_ip': remote_host,
                'remote_user': source_user,
                'remote_pwd': source_pwd,
                'debug': True
            }
            v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
            close_virsh = True

        # Check single values
        fail = []
        try:
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
        finally:
            if close_virsh:
                v2v_virsh.close_session()

        check_map = {}
        check_map['nr vCPUs'] = xml.vcpu
        check_map['hypervisor type'] = xml.hypervisor_type
        check_map['source name'] = xml.vm_name
        check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)'

        if hypervisor in ['kvm', 'xen']:
            check_map['display'] = xml.get_graphics_devices()[0].type_name

        logging.info('KEY:\tSOURCE<-> XML')
        for key in check_map:
            logging.info('%-15s:%18s <-> %s', key, source_info[key],
                         check_map[key])
            if str(check_map[key]) not in source_info[key]:
                fail.append(key)

        # Check disk info
        disk = list(xml.get_disk_all().values())[0]

        def _get_disk_subelement_attr_value(obj, attr, subattr):
            if obj.find(attr) is not None:
                return obj.find(attr).get(subattr)

        bus = _get_disk_subelement_attr_value(disk, 'target', 'bus')
        driver_type = _get_disk_subelement_attr_value(disk, 'driver', 'type')
        path = _get_disk_subelement_attr_value(disk, 'source', 'file')

        # For esx, disk output is like "disks: json: { ... } (raw) [scsi]"
        # For xen, disk output is like "disks: json: { ... } [ide]"
        # For kvm, disk output is like "/rhel8.0-2.qcow2 (qcow2) [virtio-blk]"
        if hypervisor == 'kvm':
            disks_info_pattern = "%s \(%s\) \[%s" % (path, driver_type, bus)
        elif hypervisor == 'esx':
            # replace '.vmdk' with '-flat.vmdk', this is done in v2v
            path_pattern1 = path.split()[1].replace('.vmdk', '-flat.vmdk')
            # In newer qemu version, '_' is replaced with '%5f'.
            path_pattern2 = path_pattern1.replace('_', '%5f')
            # For esx, '(raw)' is fixed? Let's see if others will be met.
            disks_info_pattern = '|'.join([
                "https://%s/folder/%s\?dcPath=data&dsName=esx.*} \(raw\) \[%s"
                % (remote_host, i, bus)
                for i in [path_pattern1, path_pattern2]
            ])
        elif hypervisor == 'xen':
            disks_info_pattern = "file\.path.*%s.*file\.host.*%s.* \[%s" % (
                path, remote_host, bus)

        source_disks = source_info['disks'].split()
        logging.info('disks:%s<->%s', source_info['disks'], disks_info_pattern)
        if not re.search(disks_info_pattern, source_info['disks']):
            fail.append('disks')

        # Check nic info
        nic = list(xml.get_iface_all().values())[0]
        type = nic.get('type')
        mac = nic.find('mac').get('address')
        nic_source = nic.find('source')
        name = nic_source.get(type)
        nic_info = '%s "%s" mac: %s' % (type, name, mac)
        logging.info('NICs:%s<->%s', source_info['NICs'], nic_info)
        if nic_info.lower() not in source_info['NICs'].lower():
            fail.append('NICs')

        # Check cpu features
        if hypervisor in ['kvm', 'xen']:
            feature_list = xml.features.get_feature_list()
            logging.info('CPU features:%s<->%s', source_info['CPU features'],
                         feature_list)
            if sorted(source_info['CPU features'].split(',')) != sorted(
                    feature_list):
                fail.append('CPU features')

        if fail:
            test.fail('Source info not correct for: %s' % fail)

    def check_man_page(in_man, not_in_man):
        """
        Check if content of man page or help info meets expectation
        """
        man_page = process.run('man virt-v2v',
                               verbose=False).stdout_text.strip()
        if in_man:
            logging.info('Checking man page of virt-v2v for "%s"', in_man)
            if in_man not in man_page:
                test.fail('"%s" not in man page' % in_man)
        if not_in_man:
            logging.info('Checking man page of virt-v2v for "%s"', not_in_man)
            if not_in_man in man_page:
                test.fail('"%s" not removed from man page' % not_in_man)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and len(line) > 72:
                        test.fail('Error log longer than 72 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                test.fail(
                                    '%s not in --machine-readable output' %
                                    line.strip())
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        if checkpoint.startswith('empty_nic_source'):
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            iface = xml.get_devices('interface')[0]
            disks = xml.get_devices('disk')
            del iface.source
            iface.type_name = checkpoint.split('_')[-1]
            iface.source = {iface.type_name: ''}
            params[checkpoint] = [iface.type_name, iface.mac_address]
            logging.debug(iface.source)
            devices = vm_xml.VMXMLDevices()
            devices.extend(disks)
            devices.append(iface)
            xml.set_devices(devices)
            logging.info(xml.xmltreefile)
            params['input_xml'] = xml.xmltreefile.name
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            if checkpoint == 'with_ic':
                ic_uri = 'qemu:///session'
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            if checkpoint == 'without_ic':
                input_option = '-i %s %s' % (input_mode, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode == 'libvirtxml':
            input_xml = params.get('input_xml')
            input_option += '-i %s %s' % (input_mode, input_xml)
        elif input_mode in ['ova']:
            test.cancel("Unsupported input mode: %s" % input_mode)
        else:
            test.error("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format", "")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
            if checkpoint == 'rhv':
                output_option = output_option.replace('rhev', 'rhv')
        output_format = params.get("output_format")
        if output_format and output_format != input_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                test.error("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.makedirs(vdsm_domain_dir)
                os.makedirs(vdsm_image_dir)
                os.makedirs(vdsm_vm_dir)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint not in [
                'length_of_error', 'empty_nic_source_network',
                'empty_nic_source_bridge', 'machine_readable'
        ]:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options or no_root:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Copy image from souce and change the image owner and group
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         os.path.basename(disk_img))
                logging.info('Copy image file %s to %s', disk_img, disk_path)
                shutil.copyfile(disk_img, disk_path)
                input_option = input_option.replace(disk_img, disk_path)
                os.chown(disk_path, user_info.pw_uid, user_info.pw_gid)
            elif not no_root:
                test.cancel("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            user = params.get("xen_host_user", "root")
            source_pwd = passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host,
                                  user=user,
                                  port=22,
                                  password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # Check if xen guest exists
            uri = utils_v2v.Uri(hypervisor).get_uri(remote_host)
            if not virsh.domain_exists(vm_name, uri=uri):
                logging.error('VM %s not exists', vm_name)
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            source_pwd = vpx_passwd = params.get("vpx_password")
            vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(),
                                           "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # if don't specify any output option for virt-v2v, 'default' pool
        # will be used.
        if output_mode is None:
            # Cleanup first to avoid failure if 'default' pool exists.
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Work around till bug fixed
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        if checkpoint in ['with_ic', 'without_ic']:
            new_v2v_user = True
            v2v_options += ' -on %s' % new_vm_name
            create_pool(user_pool=True,
                        pool_name='src_pool',
                        pool_target='v2v_src_pool')
            create_pool(user_pool=True)
            logging.debug(virsh.pool_list(uri='qemu:///session'))
            sh_install_vm = params.get('sh_install_vm')
            if not sh_install_vm:
                test.error('Source vm installing script missing')
            with open(sh_install_vm) as fh:
                cmd_install_vm = fh.read().strip()
            process.run('su - %s -c "%s"' % (v2v_user, cmd_install_vm),
                        timeout=10,
                        shell=True)
            params['cmd_clean_vm'] = "%s 'virsh undefine %s'" % (su_cmd,
                                                                 vm_name)

        if checkpoint == 'vmx':
            mount_point = params.get('mount_point')
            if not os.path.isdir(mount_point):
                os.mkdir(mount_point)
            nfs_vmx = params.get('nfs_vmx')
            if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True):
                test.error('Mount nfs for vmx failed')
            vmx = params.get('vmx')
            input_option = '-i vmx %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'vmx_ssh':
            esx_user = params.get("esx_host_user", "root")
            esx_pwd = params.get("esx_host_passwd", "123qweP")
            vmx = params.get('vmx')
            setup_esx_ssh_key(esx_ip, esx_user, esx_pwd)
            try:
                utils_misc.add_identities_into_ssh_agent()
            except Exception:
                process.run("ssh-agent -k")
                raise exceptions.TestError("Fail to setup ssh-agent")
            input_option = '-i vmx -it ssh %s' % vmx
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))

        if checkpoint == 'simulate_nfs':
            simulate_images = params.get("simu_images_path")
            simulate_vms = params.get("simu_vms_path")
            simulate_dom_md = params.get("simu_dom_md_path")
            os.makedirs(simulate_images)
            os.makedirs(simulate_vms)
            process.run('touch %s' % simulate_dom_md)
            process.run('chmod -R 777 /tmp/rhv/')

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd_export_env = 'export LIBGUESTFS_BACKEND=direct'
            cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd)

        if params.get('cmd_free') == 'yes':
            cmd = params.get('check_command')
            # only set error to 'ignore' to avoid exception for RHEL7-84978
            if "guestfish" in cmd:
                error_flag = "replace"

        # Set timeout to kill v2v process before conversion succeed
        if checkpoint == 'disk_not_exist':
            v2v_timeout = 30
        # Get tail content of /var/log/messages
        if checkpoint == 'tail_log':
            params['tail_log'] = os.path.join(data_dir.get_tmp_dir(),
                                              'tail_log')
            params['tail'] = aexpect.Tail(command='tail -f /var/log/messages',
                                          output_func=utils_misc.log_line,
                                          output_params=(params['tail_log'], ))
        cmd_result = process.run(cmd,
                                 timeout=v2v_timeout,
                                 verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options or no_root:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                try:
                    process.system(cmd)
                except Exception:
                    logging.error('Undefine "%s" failed', vm_name)
                if no_root:
                    cleanup_pool(user_pool=True,
                                 pool_name='src_pool',
                                 pool_target='v2v_src_pool')
                    cleanup_pool(user_pool=True)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        if output_mode is None:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag:
            vmcheck = utils_v2v.VMCheck(test, params, env)
            vmcheck.cleanup()
        if checkpoint in ['with_ic', 'without_ic']:
            process.run(params['cmd_clean_vm'])
        if new_v2v_user:
            process.system("userdel -f %s" % v2v_user)
        if backup_xml:
            backup_xml.sync()
        if checkpoint == 'vmx':
            utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs')
            os.rmdir(params['mount_point'])
        if checkpoint == 'simulate_nfs':
            process.run('rm -rf /tmp/rhv/')
Example #10
0
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            logging.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        logging.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        logging.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        logging.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name,
                            session_id=vmcheck.virsh_session_id).stdout
        logging.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_rhsrvany_md5(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        logging.info('Check md5 and sha1 of rhsrvany.exe')
        val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1')
        logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1)
        if not val_md5 or not val_sha1:
            test.error('No MD5 or SHA1 value provided')
        cmd_sha1 = params.get('cmd_sha1')
        cmd_md5 = cmd_sha1 + ' MD5'
        sha1 = vmcheck.session.cmd_output(
            cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '')
        md5 = vmcheck.session.cmd_output(
            cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '')
        logging.info('Actual MD5=%s, SHA1=%s', md5, sha1)
        if sha1 == val_sha1 and md5 == val_md5:
            logging.info('MD5 and SHA1 are correct')
        else:
            log_fail('MD5 or SHA1 of rhsrvany.exe not correct')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        logging.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        logging.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error and checkpoint != 'vdsm':
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s', str(e))
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            else:
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
            # Check specific checkpoints
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'sdl':
                if output_mode == 'libvirt':
                    vmchecker.check_graphics({'type': 'vnc'})
                elif output_mode == 'rhev':
                    vmchecker.check_graphics({'type': 'spice'})
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'rhsrvany_md5':
                check_rhsrvany_md5(vmchecker.checker)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        # Virtio drivers will not be installed without virtio-win setup
        if checkpoint == 'virtio_win_unset':
            missing_list = params.get('missing').split(',')
            expect_errors = ['Not find driver: ' + x for x in missing_list]
            logging.debug('Expect errors: %s' % expect_errors)
            logging.debug('Actual errors: %s' % error_list)
            if set(error_list) == set(expect_errors):
                error_list[:] = []
            else:
                logging.error('Virtio drivers not meet expectation')
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': xen_host,
            'hypervisor': 'xen',
            'main_vm': vm_name,
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'password': xen_host_passwd,
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Setup ssh-agent access to xen hypervisor
        logging.info('set up ssh-agent access ')
        ssh_key.setup_ssh_key(xen_host,
                              user=xen_host_user,
                              port=22,
                              password=xen_host_passwd)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'output_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # To RHV doesn't support 'qcow2' right now
            v2v_params['output_format'] = 'raw'
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)

        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        virsh_instance = virsh.VirshPersistent()
        virsh_instance.set_uri(uri)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params['hypervisor'] = 'kvm'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['storage'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name,
                                               params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get('vnc_passwd', 'redhat')
                }
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(vmxml,
                                               params[checkpoint]['passwd'],
                                               virsh_instance=virsh_instance)
            logging.debug(
                virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            logging.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            logging.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            logging.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                logging.debug(process.run('cat %s' % xml_file).stdout_text)
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            logging.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                        ssh_banner_content)
            logging.info('Content of ssh_banner file:')
            logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            logging.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if checkpoint.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if checkpoint.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if checkpoint.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if checkpoint.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            logging.debug(xml.xmltreefile)
            disks = xml.get_disk_all()
            logging.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.get(
                        'device') == 'cdrom' and disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            logging.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                logging.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            logging.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            logging.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            logging.info('Total disks: %d', params['disk_count'])

        # Check if xen guest exists again
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        process.run('ssh-agent -k')
        if checkpoint == 'vdsm':
            logging.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    logging.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            logging.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
            virsh_instance.close_session()
        if checkpoint == 'ssh_banner':
            logging.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
Example #11
0
def run(test, params, env):
    """
    Test command: virsh pool-edit.

    Edit the XML configuration for a storage pool('dir' type as default).
    1) Edit pool by different methods.
    2) Check the edit result and cleanup env.
    """

    pool_ref = params.get("pool_ref", "name")
    pool_name = params.get("pool_name", "default")
    pool_uuid = params.get("pool_uuid", "")
    status_error = "yes" == params.get("status_error", "no")
    pool_type = params.get("pool_type", "dir")
    pool_target = os.path.join(data_dir.get_tmp_dir(),
                               params.get("pool_target", "pool_target"))
    emulated_image = params.get("emulated_image", "emulated-image-disk")
    edit_target = params.get("edit_target", "target_path")

    pool = pool_name
    if pool_ref == "uuid":
        pool = pool_uuid
    poolxml = pool_xml.PoolXML()
    libvirt_pool = libvirt_storage.StoragePool()
    poolvolune_test = libvirt.PoolVolumeTest(test, params)
    check_pool_name = pool_name
    new_path = ""
    try:
        if not status_error:
            # Always prepare a pool for testing
            poolvolune_test.pre_pool(pool_name, pool_type, pool_target,
                                     emulated_image)
            if not pool_uuid and pool_ref == "uuid":
                pool = libvirt_pool.get_pool_uuid(pool_name)
            poolxml.xml = pool_xml.PoolXML().new_from_dumpxml(pool_name).xml
            logging.debug("Before edit pool:")
            poolxml.debug_xml()

            expect_value = ""
            # Test: Edit target path
            if edit_target == "pool_target_path":
                edit_cmd = []
                new_path = os.path.join(data_dir.get_tmp_dir(), "new_path")
                os.mkdir(new_path)
                edit_cmd.append(":%s/<path>.*</<path>" +
                                new_path.replace('/', '\/') + "<")
                pool_target = new_path
                expect_value = new_path
            # Test: Edit disk pool format type:
            elif edit_target == "pool_format_type":
                edit_cmd = []
                new_format_type = params.get("pool_format", "gpt")
                edit_cmd.append(":%s/<format type=.*\/>/<format type='" +
                                new_format_type + "'\/>/")
                expect_value = new_format_type
            # Test: Refine(Delete uuid, edit pool name and target path)
            elif edit_target == "pool_redefine":
                edit_cmd = []
                new_pool_name = params.get("new_pool_name", "new_edit_pool")
                edit_cmd.append(":g/<uuid>/d")
                new_path = os.path.join(data_dir.get_tmp_dir(), "new_pool")
                os.mkdir(new_path)
                edit_cmd.append(":%s/<path>.*</<path>" +
                                new_path.replace('/', '\/') + "<")
                edit_cmd.append(":%s/<name>" + pool_name + "</<name>" +
                                new_pool_name + "<")
                pool_target = new_path
                check_pool_name = new_pool_name

            else:
                test.cancel("No edit method for %s" % edit_target)

            # run test and check the result
            edit_pool(test, pool, edit_cmd)
            if libvirt_pool.is_pool_active(pool_name):
                libvirt_pool.destroy_pool(pool_name)
            # After change the source format, we have to rebuild the pool to
            # overwrite the disk partition table
            if edit_target == "pool_format_type":
                libvirt_pool.build_pool(pool_name, '--overwrite', debug=True)
            if not libvirt_pool.start_pool(check_pool_name):
                test.fail("Fail to start pool after edit it")
            if not check_pool(check_pool_name, edit_target, expect_value):
                test.fail("Edit pool check fail")
        else:
            # negative test
            result = virsh.pool_edit(pool)
            libvirt.check_exit_status(result, status_error)
    finally:
        for pool in [pool_name, check_pool_name]:
            if libvirt_pool.pool_exists(pool):
                poolvolune_test.cleanup_pool(check_pool_name, pool_type,
                                             pool_target, emulated_image)
        if os.path.exists(new_path):
            os.rmdir(new_path)
Example #12
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("nfs_storage")
    mnt_point = params.get("mount_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target_path", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    restore_image_owner = False
    address_cache = env.get('address_cache')
    params['vmcheck_flag'] = False
    checkpoint = params.get('checkpoint', '')

    def create_pool():
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session":
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            process.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool():
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session":
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            process.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            process.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\sconvert\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            raise exceptions.TestError("Fail to find tmp target file name when"
                                       " converting vm disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = process.system_output("grep -R \"%s\" %s" %
                                        (ovf_id, export_vm_dir))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file",
                         expected_vmtype)
        else:
            raise exceptions.TestFail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        if not img_path or not os.path.isfile(img_path):
            raise exceptions.TestError("Image path: '%s' is invalid" % img_path)
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    raise exceptions.TestFail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    raise exceptions.TestFail("%s is not a preallocated image"
                                              % img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                raise exceptions.TestFail("%s format is not %s"
                                          % (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(os.path.join(output_storage,
                                                expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            raise exceptions.TestFail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            raise exceptions.TestFail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            raise exceptions.TestFail("Not find message: %s" % init_msg)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and line > 72:
                        raise exceptions.TestFail('Error log longer than 72 '
                                                  'charactors: %s', line)
            else:
                error_map = {
                    'conflict_options': ['option used more than once'],
                    'xen_no_output_format': ['The input metadata did not define'
                                             ' the disk format']
                }
                if not utils_v2v.check_log(output, error_map[checkpoint]):
                    raise exceptions.TestFail('Not found error message %s' %
                                              error_map[checkpoint])
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "allocation", expected_mode)
            if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    raise exceptions.TestFail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == 'quiet':
                if len(output.strip()) != 0:
                    raise exceptions.TestFail('Output is not empty in quiet mode')

    backup_xml = None
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode in ['libvirtxml', 'ova']:
            raise exceptions.TestNAError("Unsupported input mode: %s" % input_mode)
        else:
            raise exceptions.TestError("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
        output_format = params.get("output_format")
        if output_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                raise exceptions.TestError("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.mkdir(vdsm_domain_dir)
                os.mkdir(vdsm_image_dir)
                os.mkdir(vdsm_vm_dir)

        # Output more messages except quiet mode
        if checkpoint == 'quiet':
            v2v_options += ' -q'
        elif checkpoint == 'length_of_error':
            pass
        else:
            v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                process.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Change the image owner and group
                ori_owner = os.stat(disk_img).st_uid
                ori_group = os.stat(disk_img).st_uid
                os.chown(disk_img, user_info.pw_uid, user_info.pw_gid)
                restore_image_owner = True
            else:
                raise exceptions.TestNAError("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'
            user = params.get("xen_host_user", "root")
            passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host, user=user,
                                  port=22, password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options and checkpoint != 'xen_no_output_format':
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'
            vpx_passwd = params.get("vpx_passwd")
            vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option,
                               output_option, v2v_options)
        if v2v_user:
            cmd = su_cmd + "'%s'" % cmd
        cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True,
                                 ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        if hypervisor == "esx":
            process.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                process.system(cmd)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        vmcheck_flag = params.get("vmcheck_flag")
        if vmcheck_flag:
            vmcheck = utils_v2v.VMCheck(test, params, env)
            vmcheck.cleanup()
        if new_v2v_user:
            process.system("userdel -f %s" % v2v_user)
        if restore_image_owner:
            os.chown(disk_img, ori_owner, ori_group)
        if backup_xml:
            backup_xml.sync()
Example #13
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            raise error.TestFail("Not find volume '%s' in pool '%s'." %
                                 (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['1M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal
    }
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name,
                                      "--disable",
                                      ignore_status=True)
        utlv.check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type not in ["dir", 'scsi']:
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name,
                                         vol_capacity, vol_allocation, "raw")
            utlv.check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info['Capacity'])
            check_pool_info(pool_info, "Allocation", new_info['Allocation'])
            check_pool_info(pool_info, "Available", new_info['Available'])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
Example #14
0
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    sparse_option_support = "yes" == params.get("sparse_option_support", "yes")
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    vol_format = params.get("vol_format", "qcow2")
    libvirt_version.is_libvirt_feature_supported(params)

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    # Destroy VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                if with_clusterSize:
                    vol_arg['format'] = vol_format
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        virsh.pool_refresh(pool_name, debug=True)
        vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            # Due to BZ 1843791, the volume cannot be obtained sometimes.
            if len(vol_list.splitlines()) < 3:
                test.fail("Failed to get iscsi type volume.")
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write data to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name, file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write data to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name, file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if operation != "mix":
            if result.exit_status != 0:
                test.fail("Fail to %s volume: %s" %
                          (operation, result.stderr))
            # Compare the change part on volume and file
            if ori_digest == aft_digest:
                logging.info("file digests match, volume %s succeed", operation)
            else:
                test.fail("file digests do not match, volume %s failed"
                          % operation)

        if operation == "mix":
            target = params.get("virt_disk_device_target", "vdb")
            disk_file_path = os.path.join(pool_target, file_name)

            # Create one disk xml and attach it to VM.
            custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file',
                                          'disk', target, 'virtio')
            ret = virsh.attach_device(vm_name, custom_disk_xml.xml,
                                      flagstr="--config", debug=True)
            libvirt.check_exit_status(ret)
            if vm.is_dead():
                vm.start()

            # Write 100M data into disk.
            data_size = 100
            write_disk(test, vm, target, data_size)
            data_size_in_bytes = data_size * 1024 * 1024

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)

            # Download volume to local with sparse option.
            download_spare_file = "download-sparse.raw"
            download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file)
            options += " --sparse"
            result = virsh.vol_download(file_name, download_file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            libvirt.check_exit_status(result)

            #Check download image size.
            one_g_in_bytes = 1073741824
            download_img_info = utils_misc.get_image_info(download_file_path)
            download_disk_size = int(download_img_info['dsize'])
            if (download_disk_size < data_size_in_bytes or
               download_disk_size >= one_g_in_bytes):
                test.fail("download image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (download_disk_size, data_size_in_bytes))

            # Create one upload sparse image file.
            upload_sparse_file = "upload-sparse.raw"
            upload_file_path = os.path.join(pool_target, upload_sparse_file)
            libvirt.create_local_disk('file', upload_file_path, '1', 'raw')

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)
            # Do volume upload, upload sparse file which download last time.
            result = virsh.vol_upload(upload_sparse_file, download_file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            upload_img_info = utils_misc.get_image_info(upload_file_path)
            upload_disk_size = int(upload_img_info['dsize'])
            if (upload_disk_size < data_size_in_bytes or
               upload_disk_size >= one_g_in_bytes):
                test.fail("upload image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (upload_disk_size, data_size_in_bytes))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
Example #15
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"
    nvram_o = None
    if platform.machine() == 'aarch64':
        nvram_o = " --nvram"
        option += nvram_o

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(data_dir.get_tmp_dir(),
                                                snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name,
                                               snp_item,
                                               to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    test.cancel("This domain has snapshot(s), "
                                "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name,
                            pool_type,
                            pool_target,
                            emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                test.fail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            ret = virsh.attach_disk(vm_name,
                                    volume,
                                    disk_target,
                                    "--config",
                                    debug=True)
            if ret.exit_status != 0:
                test.error("Attach disk failed: %s" % ret.stderr)

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref,
                                       option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True,
                                       debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref, nvram_o,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip,
                                             remote_user,
                                             remote_pwd,
                                             hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError,
                    aexpect.ShellError) as de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError as detail:
                logging.error("Detail: %s", detail)

        # After vm.destroy, virsh.domain_exists returns True due to
        # timing issue and tests fails.
        time.sleep(2)
        # Check if VM exists.
        vm_exist = virsh.domain_exists(vm_name)

        # Check if xml file exists.
        xml_exist = False
        if vm.is_qemu() and os.path.exists(
                "/etc/libvirt/qemu/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name):
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, nvram_o, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri,
                                    ignore_status=True,
                                    debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        try:
            backup_xml.sync()
        except LibvirtXMLError:
            # sync() tries to undefines and define the xml to sync
            # but virsh_undefine test would have undefined already
            # may lead to error out
            backup_xml.define()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type, pool_target,
                                emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
                virsh.snapshot_create(vm_name, file_item)

    # Check results.
    if status_error:
        if not status:
            if libvirtd_state == "off" and libvirt_version.version_compare(
                    5, 6, 0):
                logging.info(
                    "From libvirt version 5.6.0 libvirtd is restarted "
                    "and command should succeed")
            else:
                test.fail("virsh undefine return unexpected result.")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3 == 0:
                test.fail("virsh define with false acl permission" +
                          " should failed.")
    else:
        if status:
            test.fail("virsh undefine failed.")
        if undefine_twice:
            if not status2:
                test.fail("Undefine the same VM twice succeeded.")
        if vm_exist:
            test.fail("VM still exists after undefine.")
        if xml_exist:
            test.fail("Xml file still exists after undefine.")
        if option.count("managedsave") and save_exist:
            test.fail("Save file still exists after undefine.")
        if option.count("remove-all-storage") and volume_exist:
            test.fail("Volume file '%s' still exists after"
                      " undefine." % volume)
        if wipe_data and option.count("remove-all-storage"):
            if not output.count("Wiping volume '%s'" % disk_target):
                test.fail("Command didn't wipe volume storage!")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3:
                test.fail("virsh define with right acl permission" +
                          " should succeeded")
Example #16
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError(
            "Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {'source_path': source_path,
                           'source_name': source_name,
                           'source_format': source_format,
                           'pool_adapter_type': pool_adapter_type,
                           'pool_adapter_parent': pool_adapter_parent,
                           'pool_wwnn': pool_wwnn,
                           'pool_wwpn': pool_wwpn}
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug("We are going to use \"%s\" as our source device"
                      " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type,
                         pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    _DELAY_TIME*2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(
                pool_name, pool_type,
                pool_target, pool_extra_args,
                ignore_status=True, debug=True
                )
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(
                    volume_name, pool_name,
                    volume_capacity, allocation,
                    vol_format, "", debug=True
                    )
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                     timeout=_DELAY_TIME*3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info(
                "Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
        if (test_unit and
                (need_vol_create == "yes" and (pre_def_pool == "no")) and
                (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
                                _DELAY_TIME*5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                                _DELAY_TIME*5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
def run(test, params, env):
    """
    Test scenarios: virsh blockcommit with relative path

    1) Prepare test environment.
    2) Create relative path backing chain
    3) Do virsh blockcommit
    4) Check result.
    5) Recover the environments
    """
    def check_chain_backing_files(disk_src_file, expect_backing_list):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_list: backing chain lists.
        """
        # Validate source image doesn't have backing files after active blockcommit
        qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file)
        logging.debug("The actual qemu-img qemu_img_info_backing_chain:%s\n", qemu_img_info_backing_chain)
        logging.debug("The actual qemu-img expect_backing_list:%s\n", expect_backing_list)
        if qemu_img_info_backing_chain != expect_backing_list:
            test.fail("The backing files by qemu-img is not identical in expected backing list")

    def check_top_image_in_xml(expected_top_image):
        """
        check top image in src file

        :param expected_top_image: expect top image
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk.xmltreefile
                break
        logging.debug("disk xml in top: %s\n", disk_xml)
        for attr in ['file', 'name', 'dev']:
            src_file = disk_xml.find('source').get(attr)
            if src_file:
                break
        if src_file not in expected_top_image:
            test.fail("Current top img %s is not the same with expected: %s" % (src_file, expected_top_image))

    def check_blockcommit_with_bandwidth(chain_list):
        """
        Check blockcommit with bandwidth

        param chain_list: list, expected backing chain list
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n", disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        parse_source_file_list = [elem.find('source').get('file') or elem.find('source').get('name') for elem in backingstore_list]

        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether relative path has been kept
        for i in range(0, len(chain_list)-1):
            if chain_list[i] not in parse_source_file_list[i]:
                test.fail("The relative path parsed from disk xml is different with pre-expected ones")

    def check_file_not_exists(root_dir, file_name, reverse=False):
        """
        Check whether file exists in certain folder

        :param root_dir: preset root directory
        :param file_name:  input file name
        :param reverse: whether reverse the condition
        """
        files_path = [os.path.join(root_dir, f) for f in os.listdir(root_dir)
                      if os.path.isfile(os.path.join(root_dir, f))
                      ]
        logging.debug("all files in folder: %s \n", files_path)
        if not files_path:
            test.fail("Failed to get snapshot files in preset folder")
        elif reverse:
            if file_name not in files_path:
                test.fail("snapshot file:%s can not be found" % file_name)
        else:
            if file_name in files_path:
                test.fail("snapshot file:%s  can not be deleted" % file_name)

    def check_backing_chain_file_not_exists(disk_src_file, file_name, reverse=False):
        """
        Check whether file exists in source file's backing chain

        :param disk_src_file: disk source with backing chain files
        :param file_name: input file name
        :param reverse: whether reverse this condition
        """
        qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file)
        if reverse:
            if file_name not in qemu_img_info_backing_chain:
                test.fail("%s can not be found in backing chain file" % file_name)
        else:
            if file_name in qemu_img_info_backing_chain:
                test.fail("%s should not be in backing chain file" % file_name)

    def fill_vm_with_contents():
        """ Fill contents in VM """
        logging.info("Filling VM contents...")
        try:
            session = vm.wait_for_login()
            status, output = session.cmd_status_output(
                "dd if=/dev/urandom of=/tmp/bigfile bs=1M count=200")
            logging.info("Fill contents in VM:\n%s", output)
            session.close()
        except Exception as e:
            logging.error(str(e))

    def create_lvm_pool():
        """ create lvm pool"""
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
        pvt.pre_pool(**params)
        capacity = "5G"
        for i in range(1, 5):
            vol_name = 'vol%s' % i
            path = "%s/%s" % (pool_target, vol_name)
            virsh.vol_create_as(vol_name, pool_name, capacity, capacity, "qcow2", debug=True)
            cmd = "qemu-img create -f %s %s %s" % ("qcow2", path, capacity)
            process.run(cmd, ignore_status=False, shell=True)
            volume_path_list.append(path)
            capacity = "2G"

    def setup_iscsi_env():
        """ Setup iscsi environment"""
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("image_size", "10G")
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               portal_ip="127.0.0.1")
        cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
               % ("127.0.0.1", "3260", iscsi_target, lun_num, emulated_size))
        process.run(cmd, shell=True)

        blk_source_image_after_converted = "iscsi://%s:%s/%s/%s" % ("127.0.0.1", "3260", iscsi_target, lun_num)
        # Convert the image from qcow2 to raw
        convert_disk_cmd = ("qemu-img convert"
                            " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
        process.run(convert_disk_cmd, ignore_status=False, shell=True)

        replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
            vm, pre_set_root_dir, blk_source_image_after_converted, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_source_protocol': 'file'})
        return replace_disk_image, blk_source_image_after_converted, backing_chain_list

    def setup_rbd_env():
        """ Set up rbd environment"""
        params.update(
            {"virt_disk_device_target": disk_target,
             "ceph_image_file": first_src_file})
        libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True)
        ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
        ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
        blk_source_image_after_converted = ("rbd:%s:mon_host=%s" %
                                            (ceph_disk_name, ceph_mon_ip))
        replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
            vm, pre_set_root_dir, blk_source_image_after_converted, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        return replace_disk_image, blk_source_image_after_converted, backing_chain_list

    def setup_volume_pool_env():
        """Setup volume pool environment"""
        params.update(
            {"virt_disk_device_target": disk_target})
        create_lvm_pool()

        blk_source_image_after_converted = ("%s" % volume_path_list[0])
        # Convert the image from qcow2 to volume
        convert_disk_cmd = ("qemu-img convert"
                            " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted))
        process.run(convert_disk_cmd, ignore_status=False, shell=True)
        params.update({'disk_source_name': blk_source_image_after_converted,
                       'disk_type': 'block',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        libvirt.set_vm_disk(vm, params, tmp_dir)
        vm.wait_for_login().close()
        vm.destroy(gracefully=False)
        replace_disk_image, backing_chain_list = libvirt_disk.make_syslink_path_backing_files(
            pre_set_root_dir, volume_path_list, disk_format)
        params.update({'disk_source_name': replace_disk_image,
                       'disk_type': 'file',
                       'disk_format': 'qcow2',
                       'disk_source_protocol': 'file'})
        blk_source_image_after_converted = os.path.join(pre_set_root_dir, syslink_top_img)
        skip_first_one = True
        return replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list

    def validate_blockcommit_after_libvirtd_restart():
        """Validate blockcommit after libvirtd restart"""
        logging.debug("phase three blockcommit .....")
        counts = 1
        phase_three_blockcommit_options = " --active"
        libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_three_blockcommit_options, counts)
        time.sleep(3)
        # Before restart libvirtd
        mirror_content_before_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target)
        logging.debug(mirror_content_before_restart)
        utils_libvirtd.libvirtd_restart()
        # After restart libvirtd
        mirror_content_after_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target)
        logging.debug(mirror_content_after_restart)
        # Check whether mirror content is identical with previous one
        if mirror_content_before_restart != mirror_content_after_restart:
            test.fail("The mirror part content changed after libvirtd restarted")
        virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)

    def prepare_case_scenarios(snap_del_disks, base_file):
        """
        Prepare case scenarios

        :param snap_del_disks: snapshot list
        :param base_file: base file for snapshot
        """
        index = len(snap_del_disks) - 1
        option = "--top %s --base %s --delete --verbose --wait"
        scenarios = {}
        scenarios.update({"middle-to-middle": {'blkcomopt':
                          option % (snap_del_disks[index - 1], snap_del_disks[index - 2]),
                          'top': snap_del_disks[index - 1],
                          'base': snap_del_disks[index - 2]}})
        scenarios.update({"middle-to-base": {'blkcomopt':
                          option % (snap_del_disks[index - 1], base_file),
                          'top': snap_del_disks[index - 1],
                          'base': base_file}})
        scenarios.update({"top-to-middle": {'blkcomopt':
                          option % (snap_del_disks[index], snap_del_disks[index - 2]) + "  --active",
                          'top': snap_del_disks[index],
                          'base': snap_del_disks[index - 2]}})
        scenarios.update({"top-to-base": {'blkcomopt':
                                          "--top %s --delete --verbose --wait --active --pivot"
                                          % (snap_del_disks[index]),
                                          "top": snap_del_disks[index],
                                          "base": snap_del_disks[index]}})
        scenarios.update({"abort-top-job": {'blkcomopt':
                                            "--top %s --delete --verbose --wait --active --pivot --bandwidth 1"
                                            % (snap_del_disks[index]),
                                            "top": snap_del_disks[index],
                                            "base": snap_del_disks[index]}})
        return scenarios

    def loop_case_in_scenarios(scenarios):
        """
        Loop case scenarios

        :param scenarios: scenario list
        """
        # loop each scenario
        for case, opt in list(scenarios.items()):
            logging.debug("Begin scenario: %s testing....................", case)
            reverse = False
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Reset VM to initial state
            vmxml_backup.sync("--snapshots-metadata")
            vm.start()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take)
            tmp_option = opt.get('blkcomopt')
            top_file = opt.get('top')
            base_file = opt.get('base')
            if 'abort' in case:
                fill_vm_with_contents()
                ignite_blockcommit_thread = threading.Thread(target=virsh.blockcommit,
                                                             args=(vm_name, disk_target, tmp_option,),
                                                             kwargs={'ignore_status': True, 'debug': True})
                ignite_blockcommit_thread.start()
                ignite_blockcommit_thread.join(2)
                virsh.blockjob(vm_name, disk_target, " --abort", ignore_status=False)
                reverse = True
            else:
                libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', tmp_option, 1)
            # Need pivot to make effect
            if "--active" in tmp_option and "--pivot" not in tmp_option:
                virsh.blockjob(vm_name, disk_target, '--pivot', ignore_status=True)
            check_file_not_exists(pre_set_root_dir, top_file, reverse=reverse)
            if 'top' not in case:
                check_backing_chain_file_not_exists(snap_del_disks[len(snap_del_disks) - 1], top_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")

    virsh_dargs = {'debug': True}
    status_error = ("yes" == params.get("status_error", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    validate_delete_option = ("yes" == params.get("validate_delete_option", "no"))

    tmp_dir = data_dir.get_data_dir()
    top_inactive = ("yes" == params.get("top_inactive"))
    base_option = params.get("base_option", "none")
    bandwidth = params.get("blockcommit_bandwidth", "")

    disk_target = params.get("disk_target", "vda")
    disk_format = params.get("disk_format", "qcow2")
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")

    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    pool_type = params.get("pool_type")
    emulated_image = params.get("emulated_image")
    syslink_top_img = params.get("syslink_top_img")
    snapshot_take = int(params.get("snapshot_take", "4"))
    snapshot_prefix = params.get("snapshot_prefix", "snapshot")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    blk_source_image = os.path.basename(first_src_file)
    pre_set_root_dir = os.path.dirname(first_src_file)

    snapshot_external_disks = []
    skip_first_one = False
    snap_del_disks = []
    volume_path_list = []
    kkwargs = params.copy()
    pvt = libvirt.PoolVolumeTest(test, params)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" %
                  vm_name)
    try:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if disk_src_protocol == 'iscsi':
            replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_iscsi_env()
        if disk_src_protocol == "rbd":
            replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_rbd_env()
        if disk_src_protocol == "pool":
            replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list = setup_volume_pool_env()
        libvirt.set_vm_disk(vm, params, tmp_dir)

        # get a vm session before snapshot
        session = vm.wait_for_login()
        old_parts = utils_disk.get_parts_list(session)
        # Check backing files
        check_chain_backing_files(replace_disk_image, backing_chain_list)

        if vm_state == "paused":
            vm.pause()
        # Do phase one blockcommit
        phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative"
        counts = len(backing_chain_list)
        if bandwidth and base_option == "base":
            phase_one_blockcommit_options = "--top vda[1] --base vda[3] --keep-relative --bandwidth %s --active" % bandwidth
        if restart_libvirtd:
            utils_libvirtd.libvirtd_restart()
        if base_option == "shallow":
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
        elif base_option == "base":
            counts = 1
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
            check_blockcommit_with_bandwidth(backing_chain_list[::-1])
            virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)
            # Pivot commits to bottom one of backing chain
            phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative"
            counts = len(backing_chain_list)
            libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts)
        #Check top image after phase one block commit
        check_top_image_in_xml(blk_source_image_after_converted)

        # Do snapshots
        _, snapshot_external_disks = libvirt_disk.create_reuse_external_snapshots(
            vm, pre_set_root_dir, skip_first_one, disk_target)
        # Set blockcommit_options
        phase_two_blockcommit_options = "--verbose --keep-relative --shallow --active --pivot"

        # Run phase two blockcommit with snapshots
        counts = len(snapshot_external_disks) - 1
        libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_two_blockcommit_options, counts)
        #Check top image after phase two block commit
        check_top_image_in_xml(snapshot_external_disks)
        # Run dependent restart_libvirtd case
        if restart_libvirtd:
            validate_blockcommit_after_libvirtd_restart()
        # Run dependent validate_delete_option case
        if validate_delete_option:
            # Run blockcommit with snapshots to validate delete option
            # Test scenarios can be referred from https://bugzilla.redhat.com/show_bug.cgi?id=1008350
            logging.debug("Blockcommit with delete option .....")
            base_file = first_src_file
            # Get first attempt snapshot lists
            if vm.is_alive():
                vm.destroy(gracefully=False)
                # Reset VM to initial state
                vmxml_backup.sync("--snapshots-metadata")
                vm.start()
            snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take)
            scenarios = prepare_case_scenarios(snap_del_disks, base_file)
            libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
            del snap_del_disks[:]
            loop_case_in_scenarios(scenarios)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        # Delete reuse external disk if exists
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Delete snapshot disk
        libvirt_disk.cleanup_snapshots(vm, snap_del_disks)
        # Clean up created folders
        for folder in [chr(letter) for letter in range(ord('a'), ord('a') + 4)]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)

        # Remove ceph config file if created
        if disk_src_protocol == "rbd":
            libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=False)
        elif disk_src_protocol == 'iscsi' or 'iscsi_target' in locals():
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'pool':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
            rm_cmd = "rm -rf %s" % pool_target
            process.run(rm_cmd, shell=True)

        # Recover images xattr if having some
        dirty_images = libvirt_disk.get_images_with_xattr(vm)
        if dirty_images:
            libvirt_disk.clean_images_with_xattr(dirty_images)
            test.error("VM's image(s) having xattr left")
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash,fsid=0")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = [
        "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"
    ]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" %
                                            pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = utils.run("qemu-img info %s" % img_path)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name,
                                       source=img_path,
                                       target="vdf",
                                       extra=extra,
                                       debug=True)
            if result.exit_status:
                raise error.TestNAError("Failed to attach disk %s to VM."
                                        "Detail: %s." %
                                        (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if disk_xml.source.attrs.has_key('file'):
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif disk_xml.source.attrs.has_key('name'):
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif (disk_xml.source.attrs.has_key('dev')
                          and disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search(
                            "live disk snapshot not supported with this "
                            "QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search(
                                "internal snapshot of a running VM" +
                                " must include the memory state", out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)

                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(
                    vm_name, snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options,
                                                        debug=True)
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name,
                                                  snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search(
                        "revert to external \w* ?snapshot not supported yet",
                        revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
                else:
                    raise error.TestFail("Revert snapshot failed. %s" %
                                         revert_result.stderr.strip())

            if vm.is_dead():
                raise error.TestFail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    raise error.TestFail(
                        "Revert command successed, but VM is not "
                        "paused after reverting with --paused"
                        "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" %
                                                       tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                raise error.TestFail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name,
                                                   snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail(
                                "Snapshot xml file %s missing" % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still" %
                                                 snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2",
                                              brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name,
                                 pool_type,
                                 pool_target,
                                 emulated_image,
                                 source_name=vol_name)
            except error.TestFail, detail:
                libvirtd.restart()
                logging.error(str(detail))
Example #19
0
def run(test, params, env):
    """
    Test virsh vol-create and vol-create-as command to cover the following matrix:
    pool_type = [dir, fs, netfs]
    volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed,
                     vmdk, vpc]

    pool_type = [disk]
    volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm,
                     linux-raid, extended]

    pool_type = [logical]
    volume_format = [none]

    pool_type = [iscsi, scsi]
    Not supported with format type

    TODO:
    pool_type = [rbd, glusterfs]

    Reference: http://www.libvirt.org/storage.html
    """

    src_pool_type = params.get("src_pool_type")
    src_pool_target = params.get("src_pool_target")
    src_pool_format = params.get("src_pool_format", "")
    pool_vol_num = int(params.get("src_pool_vol_num", '1'))
    src_emulated_image = params.get("src_emulated_image")
    extra_option = params.get("extra_option", "")
    prefix_vol_name = params.get("vol_name", "vol_create_test")
    vol_format = params.get("vol_format", "raw")
    vol_capacity = params.get("vol_capacity", 1048576)
    vol_allocation = params.get("vol_allocation", 1048576)
    image_size = params.get("emulate_image_size", "1G")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    status_error = "yes" == params.get("status_error", "no")
    by_xml = "yes" == params.get("create_vol_by_xml", "yes")
    incomplete_target = "yes" == params.get("incomplete_target", "no")
    luks_encrypted = "luks" == params.get("encryption_method")
    encryption_secret_type = params.get("encryption_secret_type", "passphrase")
    virsh_readonly_mode = 'yes' == params.get("virsh_readonly", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if "--prealloc-metadata" in extra_option:
            test.cancel("metadata preallocation not supported in"
                        " current libvirt version.")
        if incomplete_target:
            test.cancel("It does not support generate target path"
                        "in current libvirt version.")

    pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi']
    if src_pool_type not in pool_type:
        test.cancel("pool type %s not in supported type list: %s" %
                    (src_pool_type, pool_type))

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    def create_luks_secret(vol_path):
        """
        Create secret for luks encryption
        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        utlv.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as detail:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            'redhat'.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string)
        utlv.check_exit_status(ret)

        return encryption_uuid

    def post_process_vol(ori_vol_path):
        """
        Create or disactive a volume without libvirt

        :param ori_vol_path: Full path of an original volume
        :retur: Volume name for checking
        """
        process_vol_name = params.get("process_vol_name", "process_vol")
        process_vol_options = params.get("process_vol_options", "")
        process_vol_capacity = params.get("process_vol_capacity", vol_capacity)
        process_vol_cmd = ""
        unsupport_err = "Unsupport do '%s %s' in this test" % (
            process_vol_by, process_vol_type)
        if process_vol_by == "lvcreate":
            process_vol_cmd = "lvcreate -L %s " % process_vol_capacity
            if process_vol_type == "thin":
                if not process_vol_options:
                    process_vol_options = "-T "
                process_vol_cmd += "%s " % process_vol_options
                processthin_pool_name = params.get("processthin_pool_name",
                                                   "thinpool")
                processthin_vol_name = params.get("processthin_vol_name",
                                                  "thinvol")
                process_vol_capacity = params.get("process_vol_capacity", "1G")
                os.path.dirname(ori_vol_path)
                process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path),
                                               processthin_pool_name)
                process_vol_cmd += "-V %s " % process_vol_capacity
                process_vol_cmd += "-n %s " % processthin_vol_name
                process_vol_name = processthin_vol_name
            elif process_vol_type == "snapshot":
                if not process_vol_options:
                    process_vol_options = "-s "
                process_vol_cmd += "%s " % process_vol_options
                process_vol_cmd += "-n %s " % process_vol_name
                process_vol_cmd += "%s " % (ori_vol_path)
            else:
                logging.error(unsupport_err)
                return
        elif process_vol_by == "qemu-img" and process_vol_type == "create":
            process_vol_cmd = "qemu-img create "
            process_vol_path = os.path.dirname(ori_vol_path) + "/"
            process_vol_path += process_vol_name
            process_vol_cmd += "%s " % process_vol_options
            process_vol_cmd += "%s " % process_vol_path
            process_vol_cmd += "%s " % process_vol_capacity
        elif process_vol_by == "lvchange" and process_vol_type == "deactivate":
            process_vol_cmd = "lvchange %s " % ori_vol_path
            if not process_vol_options:
                process_vol_options = "-an"
            process_vol_cmd += process_vol_options
        else:
            logging.error(unsupport_err)
            return
        rst = process.run(process_vol_cmd, ignore_status=True, shell=True)
        if rst.exit_status:
            if "Snapshots of snapshots are not supported" in rst.stderr_text:
                logging.debug("%s is already a snapshot volume", ori_vol_path)
                process_vol_name = os.path.basename(ori_vol_path)
            else:
                logging.error(rst.stderr_text)
                return
        return process_vol_name

    def check_vol(pool_name, vol_name, expect_exist=True):
        """
        Check volume vol_name in pool pool_name
        """
        src_volumes = src_pv.list_volumes().keys()
        logging.debug("Current volumes in %s: %s", pool_name, src_volumes)
        if expect_exist:
            if vol_name not in src_volumes:
                test.fail("Can't find volume %s in pool %s" %
                          (vol_name, pool_name))
            # check format in volume xml
            volxml = libvirt_xml.VolXML()
            post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name)
            logging.debug("Volume %s XML: %s" %
                          (vol_name, post_xml.xmltreefile))
            if 'format' in post_xml.keys() and vol_format is not None:
                if post_xml.format != vol_format:
                    test.fail("Volume format %s is not expected" % vol_format +
                              " as defined.")
        else:
            if vol_name in src_volumes:
                test.fail("Find volume %s in pool %s, but expect not" %
                          (vol_name, pool_name))

    fmt_err0 = "Unknown file format '%s'" % vol_format
    fmt_err1 = "Formatting or formatting option not "
    fmt_err1 += "supported for file format '%s'" % vol_format
    fmt_err2 = "Driver '%s' does not support " % vol_format
    fmt_err2 += "image creation"
    fmt_err_list = [fmt_err0, fmt_err1, fmt_err2]
    skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format
    vol_path_list = []
    secret_uuids = []
    try:
        # Create the src pool
        src_pool_name = "virt-%s-pool" % src_pool_type
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(src_pool_name,
                     src_pool_type,
                     src_pool_target,
                     src_emulated_image,
                     image_size=image_size,
                     source_format=src_pool_format)

        src_pv = libvirt_storage.PoolVolume(src_pool_name)
        src_pool_uuid = libvirt_storage.StoragePool().pool_info(
            src_pool_name)['UUID']
        # Print current pools for debugging
        logging.debug("Current pools:%s",
                      libvirt_storage.StoragePool().list_pools())
        # Create volumes by virsh in a loop
        while pool_vol_num > 0:
            # Set volume xml file
            vol_name = prefix_vol_name + "_%s" % pool_vol_num
            bad_vol_name = params.get("bad_vol_name", "")
            if bad_vol_name:
                vol_name = bad_vol_name
            pool_vol_num -= 1
            # disk partition for new volume
            if src_pool_type == "disk":
                vol_name = utlv.new_disk_vol_name(src_pool_name)
                if vol_name is None:
                    test.error("Fail to generate volume name")
            if by_xml:
                # According to BZ#1138523, we need inpect the right name
                # (disk partition) for new volume
                if src_pool_type == "disk":
                    vol_name = utlv.new_disk_vol_name(src_pool_name)
                    if vol_name is None:
                        test.error("Fail to generate volume name")
                vol_arg['name'] = vol_name
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                if luks_encrypted:
                    # For luks encrypted disk, add related xml in newvol
                    luks_encryption_params = {}
                    luks_encryption_params.update({"format": "luks"})
                    luks_secret_uuid = create_luks_secret(
                        os.path.join(src_pool_target, vol_name))
                    secret_uuids.append(luks_secret_uuid)
                    luks_encryption_params.update({
                        "secret": {
                            "type": encryption_secret_type,
                            "uuid": luks_secret_uuid
                        }
                    })
                    newvol.encryption = volxml.new_encryption(
                        **luks_encryption_params)
                vol_xml = newvol['xml']
                if params.get('setup_libvirt_polkit') == 'yes':
                    process.run("chmod 666 %s" % vol_xml,
                                ignore_status=True,
                                shell=True)
                    if luks_encrypted and libvirt_version.version_compare(
                            4, 5, 0):
                        try:
                            polkit = test_setup.LibvirtPolkitConfig(params)
                            polkit_rules_path = polkit.polkit_rules_path
                            with open(polkit_rules_path, 'r+') as f:
                                rule = f.readlines()
                                for index, v in enumerate(rule):
                                    if v.find("secret") >= 0:
                                        nextline = rule[index + 1]
                                        s = nextline.replace(
                                            "QEMU", "secret").replace(
                                                "pool_name",
                                                "secret_uuid").replace(
                                                    "virt-dir-pool",
                                                    "%s" % luks_secret_uuid)
                                        rule[index + 1] = s
                                rule = ''.join(rule)
                            with open(polkit_rules_path, 'w+') as f:
                                f.write(rule)
                            logging.debug(rule)
                            polkit.polkitd.restart()
                        except IOError as e:
                            logging.error(e)
                # Run virsh_vol_create to create vol
                logging.debug("Create volume from XML: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(
                    src_pool_name,
                    vol_xml,
                    extra_option,
                    unprivileged_user=unprivileged_user,
                    uri=uri,
                    ignore_status=True,
                    debug=True)
            else:
                # Run virsh_vol_create_as to create_vol
                pool_name = src_pool_name
                if params.get("create_vol_by_pool_uuid") == "yes":
                    pool_name = src_pool_uuid
                cmd_result = virsh.vol_create_as(
                    vol_name,
                    pool_name,
                    vol_capacity,
                    vol_allocation,
                    vol_format,
                    extra_option,
                    unprivileged_user=unprivileged_user,
                    uri=uri,
                    readonly=virsh_readonly_mode,
                    ignore_status=True,
                    debug=True)
            # Check result
            try:
                utlv.check_exit_status(cmd_result, status_error)
                check_vol(src_pool_name, vol_name, not status_error)
                if bad_vol_name:
                    pattern = "volume name '%s' cannot contain '/'" % vol_name
                    logging.debug("pattern: %s", pattern)
                    if "\\" in pattern and by_xml:
                        pattern = pattern.replace("\\", "\\\\")
                    if re.search(pattern, cmd_result.stderr) is None:
                        test.fail("vol-create failed with unexpected reason")
                if not status_error:
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    logging.debug("Full path of %s: %s", vol_name, vol_path)
                    vol_path_list.append(vol_path)
            except exceptions.TestFail as detail:
                stderr = cmd_result.stderr
                if any(err in stderr for err in fmt_err_list):
                    test.cancel(skip_msg)
                else:
                    test.fail("Create volume fail:\n%s" % detail)
        # Post process vol by other programs
        process_vol_by = params.get("process_vol_by")
        process_vol_type = params.get("process_vol_type", "")
        expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes")
        if process_vol_by and vol_path_list:
            process_vol = post_process_vol(vol_path_list[0])
            if process_vol is not None:
                try:
                    virsh.pool_refresh(src_pool_name, ignore_status=False)
                    check_vol(src_pool_name, process_vol, expect_vol_exist)
                except (process.CmdError, exceptions.TestFail) as detail:
                    if process_vol_type == "thin":
                        logging.error(str(detail))
                        test.cancel("You may encounter bug BZ#1060287")
                    else:
                        test.fail("Fail to refresh pool:\n%s" % detail)
            else:
                test.fail("Post process volume failed")
    finally:
        # Cleanup
        # For old version lvm2(2.02.106 or early), deactivate volume group
        # (destroy libvirt logical pool) will fail if which has deactivated
        # lv snapshot, so before destroy the pool, we need activate it manually
        if src_pool_type == 'logical' and vol_path_list:
            vg_name = vol_path_list[0].split('/')[2]
            process.run("lvchange -ay %s" % vg_name, shell=True)
        try:
            pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
                             src_emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
Example #20
0
def run(test, params, env):
    """
    Test command: virsh vol-resize

    Resize the capacity of the given volume (default bytes).
    1. Define and start a given type pool.
    2. Create a volume in the pool.
    3. Do vol-resize.
    4. Check the volume info.
    5. Delete the volume and pool.

    TODO:
    Add volume shrink test after libvirt uptream support it.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    vol_capacity = params.get("vol_capacity")
    vol_new_capacity = params.get("vol_new_capacity")
    resize_option = params.get("resize_option", "")
    check_vol_size = "yes" == params.get("check_vol_size", "yes")
    status_error = "yes" == params.get("status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        pool_rename_times = 0
        # Rename pool if given name pool exist, the max rename times is 5
        while libv_pool.pool_exists(pool_name) and pool_rename_times < 5:
            logging.debug("Pool '%s' already exist.", pool_name)
            pool_name = pool_name + "_t"
            logging.debug("Using a new name '%s' to define pool.", pool_name)
            pool_rename_times += 1
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name, pool_type, pool_target,
                              emulated_image, emulated_image_size)
            pool_info = libv_pool.pool_info(pool_name)
            for key in pool_info:
                logging.debug("Pool info: %s = %s", key, pool_info[key])
            # Deal with vol_new_capacity, '--capacity' only accpet integer
            if vol_new_capacity == "pool_available":
                pool_avai = pool_info["Available"].split()
                vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1]
            if vol_new_capacity == "pool_capacity":
                pool_capa = pool_info["Capacity"].split()
                vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1]

        # Create a volume
        libv_pvt.pre_vol(vol_name=vol_name,
                         vol_format=vol_format,
                         capacity=vol_capacity,
                         allocation=None,
                         pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        check_vol_info(libv_vol, vol_name)

        # The volume size may not accurate as we expect after resize, such as:
        # 1) vol_new_capacity = 1b with --delta option, the volume size will not
        #    change; run
        # 2) vol_new_capacity = 1KB with --delta option, the volume size will
        #    increase 1024 not 1000
        # So we can disable volume size check after resize
        if check_vol_size:
            vol_path = libv_vol.list_volumes()[vol_name]
            expect_info = get_expect_info(vol_new_capacity, vol_path,
                                          resize_option)
            logging.debug("Expect volume info: %s", expect_info)
        else:
            expect_info = {}

        # Run vol-resize
        result = virsh.vol_resize(vol_name,
                                  vol_new_capacity,
                                  pool_name,
                                  resize_option,
                                  uri=uri,
                                  unprivileged_user=unpri_user,
                                  debug=True)
        if not status_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stdout.strip())
            else:
                if check_vol_info(libv_vol, vol_name, expect_info):
                    logging.debug("Volume %s resize check pass.", vol_name)
                else:
                    raise error.TestFail("Volume %s resize check fail." %
                                         vol_name)
        elif result.exit_status == 0:
            raise error.TestFail("Expect resize fail but run successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
        except error.TestFail, detail:
            logging.error(str(detail))
Example #21
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.error('Missing command: virt-v2v')
    # Guest name might be changed, we need a new variant to save the original
    # name
    vm_name = params['original_vm_name'] = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    input_mode = params.get('input_mode')
    input_file = params.get('input_file')
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    os_pool = output_storage = params.get('output_storage', 'default')
    bridge = params.get('bridge')
    network = params.get('network')
    address_cache = env.get('address_cache')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_virsh_pre_conn = params.get('skip_virsh_pre_conn', 'no')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    checkpoint = params.get('checkpoint', '')
    datastore = params.get('datastore')
    esxi_host = params.get('esx_hostname')
    esxi_password = params.get('esxi_password')
    hypervisor = params.get("hypervisor")
    input_transport = params.get("input_transport")
    vmx_nfs_src = params.get("vmx_nfs_src")
    # for construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    vpx_dc = params.get("vpx_dc")
    vpx_hostname = params.get("vpx_hostname")
    vpx_password = params.get("vpx_password")
    src_uri_type = params.get('src_uri_type')
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get('v2v_opts'):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    error_list = []

    # create different sasl_user name for different job
    if output_mode == 'rhev':
        params.update({
            'sasl_user':
            params.get("sasl_user") + utils_misc.generate_random_string(3)
        })
        logging.info('sals user name is %s' % params.get("sasl_user"))
        if output_method == 'rhv_upload':
            # Create password file for '-o rhv_upload' to connect to ovirt
            with open(rhv_passwd_file, 'w') as f:
                f.write(rhv_passwd)
            # Copy ca file from ovirt to local
            remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                   ovirt_engine_passwd, ovirt_ca_file_path,
                                   local_ca_file_path)

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_BSOD():
        """
        Check if boot up into BSOD
        """
        bar = 0.999
        match_img = params.get('image_to_match')
        screenshot = '%s/BSOD_screenshot.ppm' % data_dir.get_tmp_dir()
        if match_img is None:
            test.error('No BSOD screenshot to match!')
        cmd_man_page = 'man virt-v2v|grep -i "Boot failure: 0x0000007B"'
        if process.run(cmd_man_page, shell=True).exit_status != 0:
            log_fail('Man page not contain boot failure msg')
        for i in range(100):
            virsh.screenshot(vm_name, screenshot)
            similar = ppm_utils.image_histogram_compare(screenshot, match_img)
            if similar > bar:
                logging.info('Meet BSOD with similarity %s' % similar)
                return
            time.sleep(1)
        log_fail('No BSOD as expected')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        def vm_check():
            """
            Checking the VM
            """
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # Create vmchecker before virsh.start so that the vm can be undefined
            # if started failed.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after convertion
            if params.get('skip_vm_check') != 'yes':
                if checkpoint != 'win2008r2_ostk':
                    ret = vmchecker.run()
                    if len(ret) == 0:
                        logging.info("All common checkpoints passed")
                if checkpoint == 'win2008r2_ostk':
                    check_BSOD()
                # Merge 2 error lists
                error_list.extend(vmchecker.errors)

        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            vm_check()
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'main_vm': vm_name,
            'target': target,
            'v2v_opts': v2v_opts,
            'os_storage': output_storage,
            'network': network,
            'bridge': bridge,
            'input_mode': input_mode,
            'input_file': input_file,
            'new_name': 'ova_vm_' + utils_misc.generate_random_string(3),
            'datastore': datastore,
            'esxi_host': esxi_host,
            'esxi_password': esxi_password,
            'input_transport': input_transport,
            'vmx_nfs_src': vmx_nfs_src,
            'output_method': output_method,
            'os_storage_name': storage_name,
            'os_pool': os_pool,
            'rhv_upload_opts': rhv_upload_opts,
            'params': params
        }
        if input_mode == 'vmx':
            v2v_params.update({
                'new_name':
                vm_name + utils_misc.generate_random_string(3),
                'hypervisor':
                hypervisor,
                'vpx_dc':
                vpx_dc,
                'password':
                vpx_password if src_uri_type != 'esx' else esxi_password,
                'hostname':
                vpx_hostname,
                'skip_virsh_pre_conn':
                skip_virsh_pre_conn
            })
        # copy ova from nfs storage before v2v conversion
        if input_mode == 'ova':
            src_dir = params.get('ova_dir')
            dest_dir = params.get('ova_copy_dir')
            logging.info('Copy ova from %s to %s', src_dir, dest_dir)
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            else:
                logging.debug('%s already exists, Skip copying' % dest_dir)
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Build rhev related options
        if output_mode == 'rhev':
            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()

        if checkpoint == 'ova_relative_path':
            logging.debug('Current dir: %s', os.getcwd())
            ova_dir = params.get('ova_dir')
            logging.info('Change to dir: %s', ova_dir)
            os.chdir(ova_dir)

        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
        if checkpoint == 'permission':
            os.environ['LIBGUESTFS_BACKEND'] = ''
        process.run('echo $LIBGUESTFS_BACKEND', shell=True)

        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if 'new_name' in v2v_params:
            vm_name = params['main_vm'] = v2v_params['new_name']

        check_result(v2v_result, status_error)
    finally:
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        if input_mode == 'ova' and os.path.exists(dest_dir):
            shutil.rmtree(dest_dir)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if input_mode == 'vmx' and input_transport == 'ssh':
            process.run("ssh-agent -k")
def run(test, params, env):
    """
    Test DAC in save/restore domain to nfs pool.

    (1).Init variables for test.
    (2).Create nfs pool
    (3).Start VM and check result.
    (4).Save domain to the nfs pool.
    (5).Restore domain from the nfs file.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_save_restore_host_selinux",
                               "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options", "rw,async,no_root_squash")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool file variables
    pre_file = "yes" == params.get("pre_file", "yes")
    pre_file_name = params.get("pre_file_name", "dac_nfs_file")
    file_tup = ("file_user", "file_group", "file_mode")
    file_val = []
    for i in file_tup:
        try:
            file_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    file_user, file_group, file_mode = file_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    if vm.is_alive():
        vm.destroy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk mode to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for save/restore
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     emulated_image,
                     image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # Set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Create a file on nfs server dir.
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        server_file_path = os.path.join(nfs_path, pre_file_name)
        if pre_file and not os.path.exists(server_file_path):
            open(server_file_path, 'a').close()
        if not pre_file and os.path.exists(server_file_path):
            raise error.TestNAError("File %s already exist in pool %s" %
                                    (server_file_path, pool_name))

        # Get nfs mount file path
        mnt_path = os.path.join(tmp_dir, pool_target)
        mnt_file_path = os.path.join(mnt_path, pre_file_name)

        # Change img ownership and mode on nfs server dir
        if pre_file:
            os.chown(server_file_path, file_user, file_group)
            os.chmod(server_file_path, file_mode)

        # Start VM.
        try:
            vm.start()
            # Start VM successfully.
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            raise error.TestFail("Domain failed to start. " "error: %s" % e)

        label_before = check_ownership(server_file_path)
        if label_before:
            logging.debug("file ownership on nfs server before save: %s" %
                          label_before)

        # Save domain to nfs pool file
        save_re = virsh.save(vm_name, mnt_file_path, debug=True)
        if save_re.exit_status:
            if not status_error:
                raise error.TestFail("Failed to save domain to nfs pool file.")
        else:
            if status_error:
                raise error.TestFail("Save domain to nfs pool file succeeded, "
                                     "expected Fail.")

        label_after = check_ownership(server_file_path)
        if label_after:
            logging.debug("file ownership on nfs server after save: %s" %
                          label_after)

        # Restore domain from the nfs pool file
        if not save_re.exit_status:
            restore_re = virsh.restore(mnt_file_path, debug=True)
            if restore_re.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to restore domain from nfs "
                                         "pool file.")
            else:
                if status_error:
                    raise error.TestFail("Restore domain from nfs pool file "
                                         "succeeded, expected Fail.")

            label_after_rs = check_ownership(server_file_path)
            if label_after_rs:
                logging.debug(
                    "file ownership on nfs server after restore: %s" %
                    label_after_rs)
Example #23
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    os_pool = storage = params.get('output_storage')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_vm_check = params.get('skip_vm_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = [
        'floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk',
        'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt',
        'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
        'listen_none', 'listen_socket', 'only_net', 'only_br'
    ]
    error_list = []

    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'auto_close': True,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        LOG.debug('a new virsh session %s was created', v2v_virsh)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """
        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                LOG.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        LOG.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            LOG.debug("Disks in VM: %s", output)
            disks = len(re.findall(r'Disk\s\d', output))
        LOG.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            LOG.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        LOG.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall(r'(\* kernel.*?\/boot\/config){1,}',
                                 v2v_output, re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        LOG.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall(r'((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)',
                                     str_i)
            LOG.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be
            # same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd(
            'rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines(
        )
        LOG.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error(
                'Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after conversion
        """
        blk = vmcheck.session.cmd('lsblk')
        LOG.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {
            'driver': 'qemu',
            'subdriver': 'raw',
            'sourcetype': 'file',
            'type': type,
            'targetbus': bus[type]
        }
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        LOG.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all_by_expr('device==disk')
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev',
                       dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        LOG.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, vmxml):
        """
        Check if number and type of network cards meet expectation
        """
        xmltree = xml_utils.XMLTreeFile(vmxml)
        iface_nodes = xmltree.find('devices').findall('interface')
        iflist = {}
        for node in iface_nodes:
            mac_addr = node.find('mac').get('address')
            iflist[mac_addr] = node

        LOG.debug('MAC list before v2v: %s' % mac_list)
        LOG.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {
            'root': 'e2label %s ROOT',
            'swap': 'swaplabel -L SWAPPER %s'
        }
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            LOG.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            LOG.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        LOG.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_guestfish = "guestfish get-cachedir"
        tmp_dir = session.cmd_output(cmd_guestfish).split()[-1]
        LOG.debug('Command output of tmp_dir: %s', tmp_dir)
        cmd_df = "df -m %s --output=avail" % tmp_dir
        df_output = session.cmd(cmd_df).strip()
        LOG.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        LOG.info('Available space: %dM' % avail)
        if avail <= left_space - 1:
            return None
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        large_file = os.path.join(tmp_dir, 'file.large')
        cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                     (large_file, avail - left_space + 2)
        session.cmd(cmd_create, timeout=v2v_timeout)
        newAvail = int(session.cmd(cmd_df).strip().split('\n')[-1])
        LOG.info('New Available space: %sM' % newAvail)
        return large_file

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        LOG.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            LOG.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST;
        # 3min 48s ago
        firewalld_status = session.cmd(
            'systemctl status firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd(
            'systemctl status '
            'firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Execute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            LOG.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            LOG.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        LOG.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after conversion.
        """
        LOG.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        LOG.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        LOG.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            LOG.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            LOG.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s' % str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            LOG.debug(vmchecker.vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removable devices(floppy,
                # cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'], vmchecker.vmxml)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmchecker.xmltree.find(
                        './devices/video/model').get('type')
                    if utils_v2v.multiple_versions_compare(
                            V2V_ADAPTE_SPICE_REMOVAL_VER):
                        expect_video_type = 'vga'
                    else:
                        expect_video_type = 'qxl'

                    if video_type.lower() != expect_video_type:
                        log_fail('Video expect %s, actual %s' %
                                 (expect_video_type, video_type))
            if checkpoint.startswith('listen'):
                listen_type = vmchecker.xmltree.find(
                    './devices/graphics/listen').get('type')
                LOG.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                LOG.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'check_selinuxtype':
                expect_output = vmchecker.checker.session.cmd(
                    'cat /etc/selinux/config')
                expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$',
                                               expect_output,
                                               re.MULTILINE).group(1)
                actual_output = vmchecker.checker.session.cmd('sestatus')
                actual_selinuxtype = re.search(
                    r'^Loaded policy name:\s*(\S+)$', actual_output,
                    re.MULTILINE).group(1)
                if actual_selinuxtype != expect_selinuxtype:
                    log_fail('Seliunx type not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'os_storage': storage,
            'os_pool': os_pool,
            'hostname': source_ip,
            'password': source_pwd,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3),
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'input_transport': input_transport,
            'vcenter_host': source_ip,
            'vcenter_password': source_pwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'params': params,
        }
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        utils_v2v.set_libguestfs_backend(params)

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')
            params['vm_machine'] = ori_vm_xml.xmltreefile.find(
                './os/type').get('machine')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = len(new_xml.get_disk_all_by_expr('device==disk'))
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            if params['vm_machine'] and 'q35' in params['vm_machine'] and int(
                    re.search(r'pc-q35-rhel(\d+)\.',
                              params['vm_machine']).group(1)) >= 8:
                test.cancel(
                    'Device isa-fdc is not supported with machine type %s' %
                    params['vm_machine'])
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            large_file = create_large_file(session, 800)
            if checkpoint == 'host_no_space_setcache':
                LOG.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            LOG.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {
                    'type': 'spice',
                    'passwd': params.get('spice_passwd', 'redhat')
                }
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {
                    'type': 'spice',
                    'passwdValidTo': '1970-01-01T00:00:01'
                }
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                LOG.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    LOG.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find('devices').find(
                        'video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [
                            video.attrib.pop(attr_i)
                            for attr_i in ['ram', 'vgamem']
                            if attr_i in video.attrib
                        ]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find('devices').find('graphics').find(
                'listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            LOG.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            LOG.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            LOG.info('Set service chronyd on')
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server
            ]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            LOG.info('Sync time with %s', ntp_server)
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server, 'chronyc waitsync'
            ]
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            LOG.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            LOG.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            LOG.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            LOG.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            LOG.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            LOG.info('Attach network')
            virsh.attach_interface(vm_name,
                                   'network default --current',
                                   debug=True)
        if checkpoint == 'only_br':
            LOG.info('Attatch bridge')
            virsh.attach_interface(vm_name,
                                   'bridge virbr0 --current',
                                   debug=True)
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            LOG.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh and v2v_virsh:
            LOG.debug('virsh session %s is closing', v2v_virsh)
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run('ssh-agent -k')
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status(
                    'firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            if large_file and os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
Example #24
0
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        vol_list = virsh.vol_list(pool_name).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write date to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path,
                            ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name,
                                      file_path,
                                      options,
                                      unprivileged_user=unpri_user,
                                      uri=uri,
                                      debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write date to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path,
                            ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name,
                                        file_path,
                                        options,
                                        unprivileged_user=unpri_user,
                                        uri=uri,
                                        debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if result.exit_status != 0:
            test.fail("Fail to %s volume: %s" % (operation, result.stderr))

        # Compare the change part on volume and file
        if ori_digest == aft_digest:
            logging.info("file digests match, volume %s suceed", operation)
        else:
            test.fail("file digests do not match, volume %s failed" %
                      operation)

    finally:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
Example #25
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.error('Missing command: virt-v2v')
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    input_mode = params.get('input_mode')
    input_file = params.get('input_file')
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    output_storage = params.get('output_storage', 'default')
    bridge = params.get('bridge')
    network = params.get('network')
    address_cache = env.get('address_cache')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    skip_check = 'yes' == params.get('skip_check', 'no')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    checkpoint = params.get('checkpoint', '')
    error_list = []

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def check_BSOD():
        """
        Check if boot up into BSOD
        """
        bar = 0.999
        match_img = params.get('image_to_match')
        screenshot = '%s/BSOD_screenshot.ppm' % data_dir.get_tmp_dir()
        if match_img is None:
            test.error('No BSOD screenshot to match!')
        cmd_man_page = 'man virt-v2v|grep -i "Boot failure: 0x0000007B"'
        if process.run(cmd_man_page, shell=True).exit_status != 0:
            log_fail('Man page not contain boot failure msg')
        for i in range(100):
            virsh.screenshot(vm_name, screenshot)
            similar = ppm_utils.image_histogram_compare(screenshot, match_img)
            if similar > bar:
                logging.info('Meet BSOD with similarity %s' % similar)
                return
            time.sleep(1)
        log_fail('No BSOD as expected')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if skip_check:
            logging.info('Skip checking vm after conversion')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception, e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after convertion
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_vm_check') != 'yes':
                if checkpoint != 'win2008r2_ostk':
                    ret = vmchecker.run()
                    if len(ret) == 0:
                        logging.info("All common checkpoints passed")
                if checkpoint == 'win2008r2_ostk':
                    check_BSOD()
                # Merge 2 error lists
                error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))
Example #26
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    new_pool_name = params.get("new_pool_name", "")
    build_option = params.get("build_option", "")
    iscsi_initiator = params.get("iscsi_initiator", "")
    same_source_test = "yes" == params.get("same_source_test", "no")
    customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn",
                                                  "no")
    # The file for dumped pool xml
    poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("volume_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")
    if not libvirt_version.version_compare(4, 7, 0):
        if pool_type == "iscsi-direct":
            test.cancel("iSCSI-direct pool is not supported in current"
                        "libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)",
                            str(result.stdout.strip()))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            test.fail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            test.fail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip()))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            test.fail("Not find volume '%s' in pool '%s'." %
                      (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            test.fail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." % (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                test.fail("Pool '%s' isn't '%s'." % (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target,
        'iscsi_initiator': iscsi_initiator
    }
    params.update(kwargs)

    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(**params)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=poolxml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Update pool name
        if new_pool_name:
            if "/" in new_pool_name:
                new_pool_name = new_pool_name.replace("/", "\/")
                logging.debug(new_pool_name)
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            p_xml.name = new_pool_name
            del p_xml.uuid
            poolxml = p_xml.xml
            logging.debug("XML after update pool name:\n%s" % p_xml)

        # Update host name
        if same_source_test:
            s_xml = p_xml.get_source()
            s_xml.host_name = "192.168.1.1"
            p_xml.set_source(s_xml)
            poolxml = p_xml.xml
            logging.debug("XML after update host name:\n%s" % p_xml)

        if customize_initiator_iqn:
            initiator_iqn = params.get("initiator_iqn",
                                       "iqn.2018-07.com.virttest:pool.target")
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            s_node = p_xml.xmltreefile.find('/source')
            i_node = ET.SubElement(s_node, 'initiator')
            ET.SubElement(i_node, 'iqn', {'name': initiator_iqn})
            p_xml.xmltreefile.write()
            poolxml = p_xml.xml
            logging.debug('XML after add Multi-IQN:\n%s' % p_xml)

        # Step (4)
        # Undefine pool
        if not same_source_test:
            result = virsh.pool_undefine(pool_name)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(poolxml, debug=True)
        # Give error msg when exit status is not expected
        if "/" in new_pool_name and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if "." in new_pool_name and result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if same_source_test and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            # Step (6)
            # Buid pool
            # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
            # disk/fs pool: as prepare step already make label and create filesystem
            #               for the disk, use '--overwrite' is necessary
            # logical_pool: build pool will fail if VG already exist, BZ#1373711
            if new_pool_name:
                pool_name = new_pool_name
            if pool_type != "logical":
                result = virsh.pool_build(pool_name,
                                          build_option,
                                          ignore_status=True)
                utlv.check_exit_status(result)

            # Step (7)
            # Pool start
            result = virsh.pool_start(pool_name,
                                      debug=True,
                                      ignore_status=True)
            utlv.check_exit_status(result)

            # Step (8)
            # Pool list
            option = "--persistent --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (9)
            # Pool autostart
            result = virsh.pool_autostart(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (10)
            # Pool list
            option = "--autostart --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (11)
            # Restart libvirtd and check the autostart pool
            utils_libvirtd.libvirtd_restart()
            option = "--autostart --persistent"
            check_pool_list(pool_name, option)

            # Step (12)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (13)
            # Pool autostart disable
            result = virsh.pool_autostart(pool_name,
                                          "--disable",
                                          ignore_status=True)
            utlv.check_exit_status(result)

            # Step (14)
            # Repeat step (11)
            utils_libvirtd.libvirtd_restart()
            option = "--autostart"
            check_pool_list(pool_name, option, True)

            # Step (15)
            # Pool start
            # When libvirtd starts up, it'll check to see if any of the storage
            # pools have been activated externally. If so, then it'll mark the
            # pool as active. This is independent of autostart.
            # So a directory based storage pool is thus pretty much always active,
            # and so as the SCSI pool.
            if pool_type not in ["dir", 'scsi']:
                result = virsh.pool_start(pool_name, ignore_status=True)
                utlv.check_exit_status(result)

            # Step (16)
            # Pool info
            pool_info = _pool.pool_info(pool_name)
            logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

            # Step (17)
            # Pool UUID
            result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "UUID", result.stdout.strip())

            # Step (18)
            # Pool Name
            result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "Name", result.stdout.strip())

            # Step (19)
            # Pool refresh for 'dir' type pool
            if pool_type == "dir":
                os.mknod(vol_path)
                result = virsh.pool_refresh(pool_name)
                utlv.check_exit_status(result)
                check_vol_list(vol_name, pool_name)

            # Step (20)
            # Create an over size vol in pool(expect fail), then check pool:
            # 'Capacity', 'Allocation' and 'Available'
            # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
            # and glusterfs pool not support create volume, so not test them
            if pool_type != "netfs":
                vol_capacity = "10000G"
                vol_allocation = "10000G"
                result = virsh.vol_create_as("oversize_vol", pool_name,
                                             vol_capacity, vol_allocation,
                                             "raw")
                utlv.check_exit_status(result, True)
                new_info = _pool.pool_info(pool_name)
                check_items = ["Capacity", "Allocation", "Available"]
                for i in check_items:
                    check_pool_info(pool_info, i, new_info[i])

            # Step (21)
            # Undefine pool, this should fail as the pool is active
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result, expect_error=True)
            check_pool_list(pool_name, "", False)

            # Step (22)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (23)
            # Pool delete for 'dir' type pool
            if pool_type == "dir":
                for f in os.listdir(pool_target):
                    os.remove(os.path.join(pool_target, f))
                    result = virsh.pool_delete(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)
                    option = "--inactive --type %s" % pool_type
                    check_pool_list(pool_name, option)
                    if os.path.exists(pool_target):
                        test.fail("The target path '%s' still exist." %
                                  pool_target)
                        result = virsh.pool_start(pool_name,
                                                  ignore_status=True)
                        utlv.check_exit_status(result, True)

            # Step (24)
            # Pool undefine
                result = virsh.pool_undefine(pool_name, ignore_status=True)
                utlv.check_exit_status(result)
                check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(**params)
            utlv.setup_or_cleanup_iscsi(False)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(poolxml):
            os.remove(poolxml)
Example #27
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestNAError("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug("attached image ownership on nfs server before "
                          "start: %s" % img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug("attached image ownership on nfs server after"
                              " start: %s" % img_label_after)

            if status_error:
                raise error.TestFail('Test succeeded in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in disks_snap.values():
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
Example #28
0
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """
    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            test.fail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {
            'B': 'B',
            'bytes': 'B',
            'b': 'B',
            'kib': 'K',
            'KiB': 'K',
            'K': 'K',
            'k': 'K',
            'KB': 'K',
            'mib': 'M',
            'MiB': 'M',
            'M': 'M',
            'm': 'M',
            'MB': 'M',
            'gib': 'G',
            'GiB': 'G',
            'G': 'G',
            'g': 'G',
            'GB': 'G',
            'Gb': 'G',
            'tib': 'T',
            'TiB': 'T',
            'TB': 'T',
            'T': 'T',
            't': 'T'
        }
        val = {
            'B': 1,
            'K': 1024,
            'M': 1048576,
            'G': 1073741824,
            'T': 1099511627776
        }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            test.fail("Error in parsing capacity value in" " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            test.fail("Error in parsing capacity value " "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(
            expected['name'], expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error(
                "Volume key is mismatch \n%s"
                "Key from xml: %s\nKey from command: %s", expected['name'],
                volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug(
                "virsh vol-key for volume: %s successfully"
                " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error(
                "Volume name mismatch\n"
                "Expected name: %s\nOutput of vol-name: %s", expected['name'],
                get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error(
                "Volume path mismatch for volume: %s\n"
                "Expected path: %s\nOutput of vol-path: %s\n",
                expected['name'], expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug(
                "virsh vol-path for volume: %s successfully checked"
                " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error(
                "Volume path mismatch for volume:%s\n"
                "Expected Path: %s\nPath from virsh vol-list: %s",
                expected['name'], expected['path'], actual_list['path'])
            error_count += 1
        else:
            logging.debug(
                "Path of volume: %s from virsh vol-list "
                "successfully checked against created "
                "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error(
                "Volume path mismatch for volume: %s\n"
                "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug(
                "Path of volume: %s from virsh vol-dumpxml "
                "successfully checked against created volume path",
                expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error(
                "Volume type mismatch for volume: %s\n"
                "Expected Type: %s\n Type from vol-list: %s", expected['name'],
                expected['type'], actual_list['type'])
            error_count += 1
        else:
            logging.debug(
                "Type of volume: %s from virsh vol-list "
                "successfully checked against the created "
                "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error(
                "Volume type mismatch for volume: %s\n"
                "Expected Type: %s\n Type from vol-info: %s", expected['name'],
                expected['type'], actual_info['Type'])
            error_count += 1
        else:
            logging.debug(
                "Type of volume: %s from virsh vol-info successfully"
                " checked against the created volume type", expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error(
                "Volume name mismatch for volume: %s\n"
                "Expected name: %s\n Name from vol-info: %s", expected['name'],
                expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug(
                "Name of volume: %s from virsh vol-info successfully"
                " checked against the created volume name", expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error(
                    "Volume format mismatch for volume: %s\n"
                    "Expected format: %s\n"
                    "Format from qemu-img info: %s", expected['name'],
                    expected['format'], img_info['format'])
                error_count += 1
            else:
                logging.debug(
                    "Format of volume: %s from qemu-img info "
                    "checked successfully against the created "
                    "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error(
                    "Volume format mismatch for volume: %s\n"
                    "Expected format: %s\n"
                    "Format from vol-dumpxml: %s", expected['name'],
                    expected['format'], volume_xml.format)
                error_count += 1
            else:
                logging.debug(
                    "Format of volume: %s from virsh vol-dumpxml "
                    "checked successfully against the created"
                    " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error(
                        "Encryption secret mismatch for volume: %s\n"
                        "Expected secret uuid: %s\n"
                        "Secret uuid from vol-dumpxml: %s", expected['name'],
                        expected['encrypt_secret'], secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s",
                                  secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error(
                "Pool name mismatch for volume: %s against"
                "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug(
                "Pool name of volume: %s checked successfully"
                " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = int(params.get('delta_size', "1024"))
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-list\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['list'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-info\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['info'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against virsh"
                " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['xml'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " virsh vol-dumpxml for volume: %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error(
                "Capacity mismatch for volume: %s against "
                "qemu-img info\nExpected: %s\nActual: %s", expected['name'],
                expected['capacity'], norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug(
                "Capacity value checked successfully against"
                " qemu-img info for volume: %s", expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        test.error("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        test.error("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug(
        "Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
        "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
        "vol_format:%s", pool_name, pool_type, pool_target, vol_name,
        vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(
                        **encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (
                    ip_addr, source_name, volume_name)
                process.run("qemu-img create -f %s %s %s" %
                            (vol_format, expected_vol['path'], capacity),
                            shell=True)
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            test.fail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name,
                                  pool_type,
                                  pool_target,
                                  emulated_image,
                                  source_name=source_name)
        except test.fail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
Example #29
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception as e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())

    try:
        cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = list(vol_list.keys())[0]
        logging.info(
            "Using the first LUN unit %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml,
                                            debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = (
            "qemu-img create -f luks "
            "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
            "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h',
                                 ignore_status=True,
                                 shell=True,
                                 verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source)
            and not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {
                        "sec_usage": "iscsi",
                        "sec_target": auth_sec_usage
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid,
                                           chap_passwd,
                                           encode=True,
                                           debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        chap_user=chap_user,
                        chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_uuid": auth_sec_uuid
                        }
                    elif use_auth_usage:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_usage": auth_sec_usage_target
                        }
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name",
                                           "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            device_source = "gluster://%s/%s/%s" % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            disk_src_dict = {
                "attrs": {
                    "protocol": "gluster",
                    "name": "%s/%s" % (gluster_vol_name, gluster_img_name)
                },
                "hosts": [{
                    "name": gluster_host_ip,
                    "port": "24007"
                }]
            }
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid
                    }
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name,
                                                        ceph_mon_ip)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid,
                                     luks_secret_passwd,
                                     encode=True,
                                     debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {
            "encryption": "luks",
            "secret": {
                "type": "passphrase",
                "uuid": luks_sec_uuid
            }
        }
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)