Exemple #1
0
def run(test, params, env):
    """
    Test svirt in virt-clone.
    """
    VIRT_CLONE = None
    try:
        VIRT_CLONE = utils_misc.find_command("virt-clone")
    except ValueError:
        raise error.TestNAError("No virt-clone command found.")

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_virt_clone_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_virt_clone_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_virt_clone_vm_sec_model", "selinux")
    sec_label = params.get("svirt_virt_clone_vm_sec_label", None)
    sec_relabel = params.get("svirt_virt_clone_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_virt_clone_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    clone_name = ("%s-clone" % vm.name)
    try:
        cmd = ("%s --original %s --name %s --auto-clone" %
               (VIRT_CLONE, vm.name, clone_name))
        cmd_result = utils.run(cmd, ignore_status=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to execute virt-clone command."
                                 "Detail: %s." % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if not virsh.domstate(clone_name).exit_status:
            libvirt_vm.VM(clone_name, params, None, None).remove_with_storage()
Exemple #2
0
def setup_or_cleanup_iscsi(is_setup, is_login=True,
                           emulated_image="emulated_iscsi", image_size="1G"):
    """
    Set up(and login iscsi target) or clean up iscsi service on localhost.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param is_login: Boolean value, true for login, false for not login
    :param emulated_image: name of iscsi device
    :param image_size: emulated image's size
    :return: iscsi device name or iscsi target
    """
    try:
        utils_misc.find_command("tgtadm")
        utils_misc.find_command("iscsiadm")
    except ValueError:
        raise error.TestNAError("Missing command 'tgtadm' and/or 'iscsiadm'.")

    tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
    emulated_path = os.path.join(tmpdir, emulated_image)
    emulated_target = "iqn.2001-01.com.virttest:%s.target" % emulated_image
    iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
                    "image_size": image_size, "iscsi_thread_id": "virt"}
    _iscsi = iscsi.Iscsi(iscsi_params)
    if is_setup:
        sv_status = None
        if utils_misc.selinux_enforcing():
            sv_status = utils_selinux.get_status()
            utils_selinux.set_status("permissive")
        _iscsi.export_target()
        if sv_status is not None:
            utils_selinux.set_status(sv_status)
        if is_login:
            _iscsi.login()
            # The device doesn't necessarily appear instantaneously, so give
            # about 5 seconds for it to appear before giving up
            iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
                                               "Searching iscsi device name.")
            if iscsi_device:
                logging.debug("iscsi device: %s", iscsi_device)
                return iscsi_device
            if not iscsi_device:
                logging.error("Not find iscsi device.")
            # Cleanup and return "" - caller needs to handle that
            # _iscsi.export_target() will have set the emulated_id and
            # export_flag already on success...
            _iscsi.cleanup()
            utils.run("rm -f %s" % emulated_path)
        else:
            return emulated_target
    else:
        _iscsi.export_flag = True
        _iscsi.emulated_id = _iscsi.get_target_id()
        _iscsi.cleanup()
        utils.run("rm -f %s" % emulated_path)
    return ""
Exemple #3
0
def setup_or_cleanup_iscsi(is_setup, is_login=True,
                           emulated_image="emulated_iscsi", image_size="1G"):
    """
    Set up(and login iscsi target) or clean up iscsi service on localhost.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param is_login: Boolean value, true for login, false for not login
    :param emulated_image: name of iscsi device
    :param image_size: emulated image's size
    :return: iscsi device name or iscsi target
    """
    try:
        utils_misc.find_command("tgtadm")
        utils_misc.find_command("iscsiadm")
    except ValueError:
        raise error.TestNAError("Missing command 'tgtadm' and/or 'iscsiadm'.")

    tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
    emulated_path = os.path.join(tmpdir, emulated_image)
    emulated_target = "iqn.2001-01.com.virttest:%s.target" % emulated_image
    iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
                    "image_size": image_size, "iscsi_thread_id": "virt"}
    _iscsi = iscsi.Iscsi(iscsi_params)
    if is_setup:
        sv_status = None
        if utils_misc.selinux_enforcing():
            sv_status = utils_selinux.get_status()
            utils_selinux.set_status("permissive")
        _iscsi.export_target()
        if sv_status is not None:
            utils_selinux.set_status(sv_status)
        if is_login:
            _iscsi.login()
            # The device doesn't necessarily appear instantaneously, so give
            # about 5 seconds for it to appear before giving up
            iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
                                               "Searching iscsi device name.")
            if iscsi_device:
                logging.debug("iscsi device: %s", iscsi_device)
                return iscsi_device
            if not iscsi_device:
                logging.error("Not find iscsi device.")
            # Cleanup and return "" - caller needs to handle that
            # _iscsi.export_target() will have set the emulated_id and
            # export_flag already on success...
            _iscsi.cleanup()
            utils.run("rm -f %s" % emulated_path)
        else:
            return emulated_target
    else:
        _iscsi.export_flag = True
        _iscsi.emulated_id = _iscsi.get_target_id()
        _iscsi.cleanup()
        utils.run("rm -f %s" % emulated_path)
    return ""
    def __init__(self, test, params):
        self.vm_name = params.get("vm_name", "test-vm1")
        self.test = test
        while virsh.domain_exists(self.vm_name):
            self.vm_name += ".test"
        params["main_vm"] = self.vm_name
        ios_file = os.path.join(data_dir.get_data_dir(),
                                params.get('cdrom_cd1'))
        if not os.path.exists(ios_file):
            self.test.cancel("Please prepare ios file:%s" % ios_file)
        self.env = params.get('env')
        self.vm = self.env.create_vm("libvirt", None, self.vm_name, params,
                                     test.bindir)
        self.env.register_vm(self.vm_name, self.vm)
        self.twice_execute = "yes" == params.get("twice_execute", "no")
        self.kill_first = "yes" == params.get("kill_first", "no")
        self.read_only = "yes" == params.get("read_only", "no")
        self.selinux_enforcing = utils_selinux.is_enforcing()
        if self.selinux_enforcing:
            utils_selinux.set_status("permissive")
        self.image_path = os.path.join(test.virtdir, "test_image")
        if not os.path.exists(self.image_path):
            os.mkdir(self.image_path)
        if self.read_only:
            os.chmod(self.image_path,
                     stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
        params["image_name"] = os.path.join(self.image_path, self.vm_name)
        params["image_format"] = "raw"
        params['force_create_image'] = "yes"
        params['remove_image'] = "yes"
        params['shutdown_cleanly'] = "yes"
        params['shutdown_cleanly_timeout'] = 120
        params['guest_port_unattended_install'] = 12323
        params['inactivity_watcher'] = "error"
        params['inactivity_treshold'] = 1800
        params['image_verify_bootable'] = "no"
        params['unattended_delivery_method'] = "cdrom"
        params['drive_index_unattended'] = 1
        params['drive_index_cd1'] = 2
        params['boot_once'] = "d"
        params['medium'] = "cdrom"
        params['wait_no_ack'] = "yes"
        params['image_raw_device'] = "yes"
        params['backup_image_before_testing'] = "no"
        params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                   "console=ttyS0,115200 console=tty0")
        params['cdroms'] += " unattended"
        params['redirs'] += " unattended_install"

        self.params = params
Exemple #5
0
    def __init__(self, test, params):
        self.vm_name = params.get("vm_name", "test-vm1")
        while virsh.domain_exists(self.vm_name):
            self.vm_name += ".test"
        params["main_vm"] = self.vm_name
        ios_file = os.path.join(data_dir.get_data_dir(),
                                params.get('cdrom_cd1'))
        if not os.path.exists(ios_file):
            raise error.TestNAError("Please prepare ios file:%s" % ios_file)
        self.env = params.get('env')
        self.vm = self.env.create_vm("libvirt", None, self.vm_name, params,
                                     test.bindir)
        self.env.register_vm(self.vm_name, self.vm)
        self.twice_execute = "yes" == params.get("twice_execute", "no")
        self.kill_first = "yes" == params.get("kill_first", "no")
        self.read_only = "yes" == params.get("read_only", "no")
        self.selinux_enforcing = utils_selinux.is_enforcing()
        if self.selinux_enforcing:
            utils_selinux.set_status("permissive")
        self.image_path = os.path.join(test.virtdir, "test_image")
        if not os.path.exists(self.image_path):
            os.mkdir(self.image_path)
        if self.read_only:
            os.chmod(self.image_path,
                     stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
        params["image_name"] = os.path.join(self.image_path, self.vm_name)
        params["image_format"] = "raw"
        params['force_create_image'] = "yes"
        params['remove_image'] = "yes"
        params['shutdown_cleanly'] = "yes"
        params['shutdown_cleanly_timeout'] = 120
        params['guest_port_unattended_install'] = 12323
        params['inactivity_watcher'] = "error"
        params['inactivity_treshold'] = 1800
        params['image_verify_bootable'] = "no"
        params['unattended_delivery_method'] = "cdrom"
        params['drive_index_unattended'] = 1
        params['drive_index_cd1'] = 2
        params['boot_once'] = "d"
        params['medium'] = "cdrom"
        params['wait_no_ack'] = "yes"
        params['image_raw_device'] = "yes"
        params['backup_image_before_testing'] = "no"
        params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                   "console=ttyS0,115200 console=tty0")
        params['cdroms'] += " unattended"
        params['redirs'] += " unattended_install"

        self.params = params
        self.test = test
Exemple #6
0
 def recover(self, params=None):
     """
     Recover test environment
     """
     if self.selinux_enforcing:
         utils_selinux.set_status("enforcing")
     if virsh.domain_exists(self.vm_name):
         virsh.remove_domain(self.vm_name)
     image_file = params.get("image_name")
     if os.path.exists(image_file):
         os.remove(image_file)
     if os.path.isdir(self.image_path):
         os.rmdir(self.image_path)
     self.env.unregister_vm(self.vm_name)
 def recover(self, params=None):
     """
     Recover test environment
     """
     if self.selinux_enforcing:
         utils_selinux.set_status("enforcing")
     if virsh.domain_exists(self.vm_name):
         virsh.remove_domain(self.vm_name)
     image_file = params.get("image_name")
     if os.path.exists(image_file):
         os.remove(image_file)
     if os.path.isdir(self.image_path):
         os.rmdir(self.image_path)
     self.env.unregister_vm(self.vm_name)
Exemple #8
0
 def restore(self, name):
     info = name
     if info['name'] == 'libvirtd':
         if info['status'] == 'running':
             if not self.libvirtd.start():
                 raise Exception('Failed to start libvirtd')
         elif info['status'] == 'stopped':
             if not self.libvirtd.stop():
                 raise Exception('Failed to stop libvirtd')
         else:
             raise Exception('Unknown libvirtd status %s' % info['status'])
     elif info['name'] == 'selinux':
         utils_selinux.set_status(info['status'])
     else:
         raise Exception('Unknown service %s' % info['name'])
Exemple #9
0
def state_test():
    states = [ServiceState(), FileState(), DirState(), DomainState(),
              NetworkState(), PoolState(), SecretState(), MountState()]
    for state in states:
        state.backup()
    utils.run('echo hello > /etc/exports')
    virsh.start('virt-tests-vm1')
    virsh.net_autostart('default', '--disable')
    virsh.pool_destroy('mount')
    utils.run('rm /var/lib/virt_test/images/hello')
    utils.run('mkdir /var/lib/virt_test/images/hi')
    utils_libvirtd.Libvirtd().stop()
    utils_selinux.set_status('permissive')
    for state in states:
        lines = state.check(recover=True)
        for line in lines:
            print line
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
Exemple #11
0
 def __init__(self, test, params):
     self.td = None
     self.cpu_num = int(params.get("cpu_num", "1"))
     self.vm_name = params.get("main_vm")
     self.vm_new_name = params.get("vm_new_name")
     self.cgroup_name = params.get("cgroup_name")
     self.cgroup_dir = params.get("cgroup_dir")
     self.new_image_file = params.get("new_image_file")
     if self.new_image_file:
         self.new_image_file = os.path.join(test.virtdir,
                                            self.new_image_file)
     self.time_out = int(params.get("time_out", "600"))
     self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
     self.twice_execute = "yes" == params.get("twice_execute", "no")
     self.kill_first = "yes" == params.get("kill_first", "no")
     if params.get("abnormal_type") in ["disk_lack", ""]:
         self.selinux_enforcing = utils_selinux.is_enforcing()
         if self.selinux_enforcing:
             utils_selinux.set_status("permissive")
         self.fs_type = params.get("fs_type", "ext4")
         xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
         disk_node = xml_file.get_disk_all()['vda']
         source_file = disk_node.find('source').get('file')
         self.image_size = utils_misc.get_image_info(source_file)['dsize']
         # Set the size to be image_size
         iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
         params['image_size'] = iscsi_size
         self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
                                                "iscsi")
         try:
             device_source = self.iscsi_dev.setup()
         except (exceptions.TestError, ValueError) as detail:
             self.iscsi_dev.cleanup()
             self.test.cancel("Cannot get iscsi device on this"
                              " host:%s\n" % detail)
         libvirt.mk_label(device_source)
         libvirt.mk_part(device_source, iscsi_size)
         self.mount_dir = os.path.join(test.virtdir,
                                       params.get('mount_dir'))
         if not os.path.exists(self.mount_dir):
             os.mkdir(self.mount_dir)
         params['mount_dir'] = self.mount_dir
         self.partition = device_source + "1"
         libvirt.mkfs(self.partition, self.fs_type)
         utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
         self.new_image_file = os.path.join(self.mount_dir, "new_file")
Exemple #12
0
 def recover(self, params):
     """
     Recover test environment
     """
     abnormal_type = params.get("abnormal_type")
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     if virsh.domain_exists(self.vm_new_name):
         virsh.remove_domain(self.vm_new_name)
     if os.path.exists(self.new_image_file):
         os.remove(self.new_image_file)
     if self.twice_execute:
         if virsh.domain_exists(self.vm_new_name1):
             virsh.remove_domain(self.vm_new_name1)
         if os.path.exists(self.new_image_file1):
             os.remove(self.new_image_file1)
     if abnormal_type == "memory_lack":
         if params.has_key('memory_pid'):
             pid = params.get('memory_pid')
             if isinstance(pid,str):
                 pid = int(pid)
             utils_misc.safe_kill(pid, signal.SIGKILL)
             utils.run("swapon -a")
         tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
         tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
         if os.path.exists(tmp_c_file):
             os.remove(tmp_c_file)
         if os.path.exists(tmp_exe_file):
             os.remove(tmp_exe_file)
     elif abnormal_type in ["disk_lack", ""]:
         if self.selinux_enforcing:
             utils_selinux.set_status("enforcing")
         tmp_file = os.path.join(self.mount_dir, "tmp")
         if os.path.exists(tmp_file):
             os.remove(tmp_file)
         # Sometimes one umount action is not enough
         utils_misc.wait_for(lambda: utils_misc.umount(self.partition,
                                                       self.mount_dir,
                                                       self.fs_type), 120)
         if self.iscsi_dev:
             self.iscsi_dev.cleanup()
         os.rmdir(self.mount_dir)
     elif abnormal_type == "cpu_lack":
         os.system("cat /sys/fs/cgroup/cpuset/cpuset.cpus > /sys/fs/cgroup/cpuset/machine.slice/cpuset.cpus")
     remove_machine_cgroup()
 def recover(self, params):
     """
     Recover test environment
     """
     abnormal_type = params.get("abnormal_type")
     if self.cpu_status:
         cpu.offline(self.cpu_num)
     else:
         cpu.online(self.cpu_num)
     if virsh.domain_exists(self.vm_new_name):
         virsh.remove_domain(self.vm_new_name)
     if os.path.exists(self.new_image_file):
         os.remove(self.new_image_file)
     if self.twice_execute:
         if virsh.domain_exists(self.vm_new_name1):
             virsh.remove_domain(self.vm_new_name1)
         if os.path.exists(self.new_image_file1):
             os.remove(self.new_image_file1)
     if abnormal_type == "memory_lack":
         if 'memory_pid' in params:
             pid = params.get('memory_pid')
             utils_misc.safe_kill(pid, signal.SIGKILL)
             process.run("swapon -a", shell=True)
         tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
         tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
         if os.path.exists(tmp_c_file):
             os.remove(tmp_c_file)
         if os.path.exists(tmp_exe_file):
             os.remove(tmp_exe_file)
     elif abnormal_type in ["disk_lack", ""]:
         if self.selinux_enforcing:
             utils_selinux.set_status("enforcing")
         tmp_file = os.path.join(self.mount_dir, "tmp")
         if os.path.exists(tmp_file):
             os.remove(tmp_file)
         # Sometimes one umount action is not enough
         utils_misc.wait_for(
             lambda: utils_misc.umount(self.partition, self.mount_dir, self.
                                       fs_type), 120)
         if self.iscsi_dev:
             self.iscsi_dev.cleanup()
         os.rmdir(self.mount_dir)
     remove_machine_cgroup()
def run(test, params, env):
    """
    Test svirt in virt-install.

    (1). Init variables.
    (2). Set selinux on host.
    (3). Set label of image.
    (4). run unattended install.
    (5). clean up.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")

    # Set selinux status on host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    # Set the image label.
    disk_label = params.get("disk_label", None)
    vm_name = params.get("main_vm", None)
    vm_params = params.object_params(vm_name)
    base_dir = params.get("images_base_dir", data_dir.get_data_dir())
    image_filename = storage.get_image_filename(vm_params, base_dir)
    utils_selinux.set_context_of_file(image_filename, disk_label)

    try:
        try:
            unattended_install.run(test, params, env)
            # Install completed.
            if status_error:
                raise error.TestFail('Test successed in negative case.')
        except error.CmdError, e:
            # Install failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # cleanup
        utils_selinux.set_status(backup_sestatus)
        if virsh.domain_exists(vm_name):
            virsh.remove_domain(vm_name)
Exemple #15
0
def setup_or_cleanup_nfs(is_setup,
                         mount_dir="",
                         is_mount=False,
                         export_options="rw,no_root_squash",
                         mount_src="nfs-export"):
    """
    Set up or clean up nfs service on localhost.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param mount_dir: NFS mount point
    :param is_mount: Boolean value, true for mount, false for umount
    :param export_options: options for nfs dir
    :return: export nfs path or nothing
    """
    tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
    if not os.path.isabs(mount_src):
        mount_src = os.path.join(tmpdir, mount_src)
    if not mount_dir:
        mount_dir = os.path.join(tmpdir, 'nfs-mount')

    nfs_params = {
        "nfs_mount_dir": mount_dir,
        "nfs_mount_options": "rw",
        "nfs_mount_src": mount_src,
        "setup_local_nfs": "yes",
        "export_options": "rw,no_root_squash"
    }
    _nfs = nfs.Nfs(nfs_params)
    # Set selinux to permissive that the file in nfs
    # can be used freely
    if utils_misc.selinux_enforcing():
        sv_status = utils_selinux.get_status()
        utils_selinux.set_status("permissive")
    if is_setup:
        _nfs.setup()
        if not is_mount:
            _nfs.umount()
        return mount_src
    else:
        _nfs.unexportfs_in_clean = True
        _nfs.cleanup()
        return ""
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError), e:
        raise error.TestFail("Test failed: %s" % str(e))
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError), e:
        raise error.TestFail("Test failed: %s" % str(e))
 def recover(self, params):
     """
     Recover test environment
     """
     abnormal_type = params.get("abnormal_type")
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     if virsh.domain_exists(self.vm_new_name):
         virsh.remove_domain(self.vm_new_name)
     if os.path.exists(self.new_image_file):
         os.remove(self.new_image_file)
     if self.twice_execute:
         if virsh.domain_exists(self.vm_new_name1):
             virsh.remove_domain(self.vm_new_name1)
         if os.path.exists(self.new_image_file1):
             os.remove(self.new_image_file1)
     if abnormal_type == "memory_lack":
         if 'memory_pid' in params:
             pid = params.get('memory_pid')
             utils_misc.safe_kill(pid, signal.SIGKILL)
             process.run("swapon -a", shell=True)
         tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
         tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
         if os.path.exists(tmp_c_file):
             os.remove(tmp_c_file)
         if os.path.exists(tmp_exe_file):
             os.remove(tmp_exe_file)
     elif abnormal_type in ["disk_lack", ""]:
         if self.selinux_enforcing:
             utils_selinux.set_status("enforcing")
         tmp_file = os.path.join(self.mount_dir, "tmp")
         if os.path.exists(tmp_file):
             os.remove(tmp_file)
         # Sometimes one umount action is not enough
         utils_misc.wait_for(lambda: utils_misc.umount(self.partition,
                                                       self.mount_dir,
                                                       self.fs_type), 120)
         if self.iscsi_dev:
             self.iscsi_dev.cleanup()
         os.rmdir(self.mount_dir)
     remove_machine_cgroup()
Exemple #19
0
def setup_or_cleanup_nfs(is_setup, mount_dir="", is_mount=False,
                         export_options="rw,no_root_squash",
                         mount_src="nfs-export"):
    """
    Set up or clean up nfs service on localhost.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param mount_dir: NFS mount point
    :param is_mount: Boolean value, true for mount, false for umount
    :param export_options: options for nfs dir
    :return: export nfs path or nothing
    """
    tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
    if not os.path.isabs(mount_src):
        mount_src = os.path.join(tmpdir, mount_src)
    if not mount_dir:
        mount_dir = os.path.join(tmpdir, 'nfs-mount')

    nfs_params = {"nfs_mount_dir": mount_dir, "nfs_mount_options": "rw",
                  "nfs_mount_src": mount_src, "setup_local_nfs": "yes",
                  "export_options": "rw,no_root_squash"}
    _nfs = nfs.Nfs(nfs_params)
    # Set selinux to permissive that the file in nfs
    # can be used freely
    if utils_misc.selinux_enforcing():
        sv_status = utils_selinux.get_status()
        utils_selinux.set_status("permissive")
    if is_setup:
        _nfs.setup()
        if not is_mount:
            _nfs.umount()
        return mount_src
    else:
        _nfs.unexportfs_in_clean = True
        _nfs.cleanup()
        return ""
def run_svirt_attach_disk(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in disks.values():
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)
    # Init a QemuImg instance.
    params['image_name'] = img_name
    tmp_dir = data_dir.get_tmp_dir()
    image = qemu_storage.QemuImg(params, tmp_dir, img_name)
    # Create a image.
    img_path, result = image.create(params)
    # Set the context of the image.
    utils_selinux.set_context_of_file(filename=img_path, context=img_label)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    # Do the attach action.
    try:
        virsh.attach_disk(vm_name, source=img_path, target="vdf",
                          extra="--persistent", ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Attach disk %s to vdf on VM %s failed."
                             % (img_path, vm.name))

    # Check result.
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                raise error.TestFail('Test successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
        image.remove()
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set image and
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    (4) Destroy VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
    qemu_group_user = "******" == params.get("qemu_group_user", "no")
    # Get variables about seclabel for VM.
    sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
    sec_label = params.get("dac_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
    security_default_confined = params.get("security_default_confined", None)
    set_process_name = params.get("set_process_name", None)
    sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
    if sec_label:
        sec_dict['label'] = sec_label
    set_sec_label = "yes" == params.get("set_sec_label", "no")
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    qemu_no_usr_grp = "yes" == params.get("qemu_no_usr_grp", "no")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", None)
    qemu_group = params.get("qemu_group", None)
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('dac_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    qemu_disk_mod = False
    for disk in list(disks.values()):
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        label_list = img_label.split(":")
        os.chown(disk_path, int(label_list[0]), int(label_list[1]))
        os.close(f)
        st = os.stat(disk_path)
        if not bool(st.st_mode & stat.S_IWGRP):
            # add group wirte mode to disk by chmod g+w
            os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
            qemu_disk_mod = True

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    def _create_user():
        """
        Create a "vdsm_fake" in 'qemu' group for test
        """
        logging.debug("create a user 'vdsm_fake' in 'qemu' group")
        cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
        process.run(cmd, ignore_status=False, shell=True)

    create_qemu_user = False
    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Check qemu_group_user
        if qemu_group_user:
            if set_qemu_conf:
                if "EXAMPLE" in qemu_user:
                    if not check_qemu_grp_user("vdsm_fake", test):
                        _create_user()
                        create_qemu_user = True
                    qemu_user = "******"
                    qemu_group = "qemu"
            if set_sec_label:
                if sec_label:
                    if "EXAMPLE" in sec_label:
                        if not check_qemu_grp_user("vdsm_fake", test):
                            _create_user()
                            create_qemu_user = True
                        sec_label = "vdsm_fake:qemu"
                        sec_dict['label'] = sec_label
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        if set_qemu_conf:
            # Transform qemu user and group to "uid:gid"
            qemu_user = qemu_user.replace("+", "")
            qemu_group = qemu_group.replace("+", "")
            qemu_conf_label_trans = format_user_group_str(
                qemu_user, qemu_group)

            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            if security_default_confined:
                qemu_conf.security_default_confined = security_default_confined
            if set_process_name:
                qemu_conf.set_process_name = set_process_name
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if set_sec_label:
            # Transform seclabel to "uid:gid"
            if sec_label:
                sec_label = sec_label.replace("+", "")
                if ":" in sec_label:
                    user, group = sec_label.split(":")
                    sec_label_trans = format_user_group_str(user, group)

            # Set the context of the VM.
            logging.debug("sec_dict is %s" % sec_dict)
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()
            logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the qemu process and image.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)

            # Get vm image label when VM is running
            f = os.open(list(disks.values())[0]['source'], 0)
            stat_re = os.fstat(f)
            disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)

            # Check vm process and image DAC label after vm start
            if set_sec_label and sec_label:
                if ":" in sec_label:
                    if vm_context != sec_label_trans:
                        test.fail("Label of VM processs is not "
                                  "expected after starting.\nDetail:"
                                  "vm_context=%s, sec_label_trans=%s" %
                                  (vm_context, sec_label_trans))
                    if sec_relabel == "yes":
                        if dynamic_ownership:
                            if disk_context != sec_label_trans:
                                test.fail("Label of disk is not " +
                                          "expected" +
                                          " after VM starting.\n" +
                                          "Detail: disk_context" +
                                          "=%s" % disk_context +
                                          ", sec_label_trans=%s." %
                                          sec_label_trans)
            elif (set_qemu_conf and not security_default_confined
                  and not qemu_no_usr_grp):
                if vm_context != qemu_conf_label_trans:
                    test.fail("Label of VM processs is not expected"
                              " after starting.\nDetail: vm_context="
                              "%s, qemu_conf_label_trans=%s" %
                              (vm_context, qemu_conf_label_trans))
                if disk_context != qemu_conf_label_trans:
                    if dynamic_ownership:
                        test.fail("Label of disk is not expected " +
                                  "after VM starting.\nDetail: di" +
                                  "sk_context=%s, " % disk_context +
                                  "qemu_conf_label_trans=%s." %
                                  qemu_conf_label_trans)

            # check vm started with -name $vm_name,process=qemu:$vm_name
            if set_process_name:
                if libvirt_version.version_compare(1, 3, 5):
                    chk_str = "-name guest=%s,process=qemu:%s" % (vm_name,
                                                                  vm_name)
                else:
                    chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
                cmd = "ps -p %s -o command=" % vm_pid
                result = process.run(cmd, shell=True)
                if chk_str in result.stdout:
                    logging.debug("%s found in vm process command: %s" %
                                  (chk_str, result.stdout))
                else:
                    test.fail("%s not in vm process command: %s" %
                              (chk_str, result.stdout))

            # Check the label of disk after VM being destroyed.
            vm.destroy()
            f = os.open(list(disks.values())[0]['source'], 0)
            stat_re = os.fstat(f)
            img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)
            if set_sec_label and sec_relabel == "yes":
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when sec_relabel enabled.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
            elif set_qemu_conf and not set_sec_label:
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when only set qemu.conf.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
                else:
                    if (not img_label_after == img_label):
                        test.fail("Bug: Label of disk is changed\n"
                                  "Detail: img_label_after=%s, "
                                  "img_label=%s.\n" %
                                  (img_label_after, img_label))
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                err_msg = "Domain start failed as expected, check "
                err_msg += "more in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=856951"
                if set_sec_label:
                    if sec_label:
                        if sec_relabel == "yes" and sec_label_trans == "0:0":
                            if set_qemu_conf and not qemu_no_usr_grp:
                                if qemu_conf_label_trans == "107:107":
                                    logging.debug(err_msg)
                        elif sec_relabel == "no" and sec_label_trans == "0:0":
                            if not set_qemu_conf:
                                logging.debug(err_msg)
                else:
                    test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
            if qemu_disk_mod:
                st = os.stat(path)
                os.chmod(path, st.st_mode ^ stat.S_IWGRP)
        if set_sec_label:
            backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        if create_qemu_user:
            cmd = "userdel -r vdsm_fake"
            output = process.run(cmd, ignore_status=True, shell=True)
        utils_selinux.set_status(backup_sestatus)
Exemple #22
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
        "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in disks.values():
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            o_r = utils_selinux.verify_defcon(pathname=disk_path,
                                              readonly=False,
                                              forcedesc=True)
            orig_label_type = backup_labels_of_disks[disk_path].split(":")[2]
            if o_r and (orig_label_type != img_label_type):
                raise error.TestFail("change disk label(%s) failed" %
                                     img_label_type)
            os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined
                or security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after "
                                     "starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s" %
                                     (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s." %
                                     (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    raise error.TestFail("Label of disk is not relabeled by "
                                         "VM\nDetal: disk_context="
                                         "%s, imagelabel=%s" %
                                         (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                # Bug 547546 - RFE: the security drivers must remember original
                # permissions/labels and restore them after
                # https://bugzilla.redhat.com/show_bug.cgi?id=547546

                err_msg = "Label of disk is not restored in VM shuting down.\n"
                err_msg += "Detail: img_label_after=%s, " % img_label_after
                err_msg += "img_label_before=%s.\n" % img_label
                err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=547546"
                raise error.TestFail(err_msg)
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in backup_ownership_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined
                or security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output("fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path))

                    result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK") == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}})
                    dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"}
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Exemple #24
0
         if len(graphic_video) > 1:
             video_type = graphic_video[1]
             logging.info('Set video type to %s', video_type)
             vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
             video = vmxml.xmltreefile.find('devices').find('video').find('model')
             video.set('type', video_type)
             vmxml.sync()
 if checkpoint.startswith('listen'):
     listen_type = checkpoint.split('_')[-1]
     vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
     listen = vmxml.xmltreefile.find('devices').find('graphics').find('listen')
     listen.set('type', listen_type)
     vmxml.sync()
 if checkpoint == 'host_selinux_on':
     params['selinux_stat'] = utils_selinux.get_status()
     utils_selinux.set_status('enforcing')
 if checkpoint.startswith('selinux'):
     set_selinux(checkpoint[8:])
 if checkpoint.startswith('host_firewalld'):
     service_mgr = service.ServiceManager()
     logging.info('Backing up firewall services status')
     params['bk_firewalld_status'] = service_mgr.status('firewalld')
     if 'start' in checkpoint:
         service_mgr.start('firewalld')
     if 'stop' in checkpoint:
         service_mgr.stop('firewalld')
 if checkpoint == 'guest_firewalld_status':
     get_firewalld_status()
 if checkpoint == 'remove_securetty':
     logging.info('Remove /etc/securetty file from guest')
     cmd = ['rm -f /etc/securetty']
Exemple #25
0
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') == 'network':
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                raise exceptions.TestError('Not enough network interface')
            logging.debug('MAC address: %s' % params['mac_address'])
        elif checkpoint.startswith('spice'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'spice'})
            if checkpoint == 'spice_encrypt':
                spice_passwd = {'passwd': params.get('spice_passwd', 'redhat')}
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {'passwdValidTo': '1970-01-01T00:00:01'}
        elif checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        elif checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])

        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        check_result(v2v_result, status_error)
    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if backup_xml:
            backup_xml.sync()
        if checkpoint == 'unclean_fs':
            cleanup_fs()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
Exemple #26
0
    def export_target(self):
        """
        Export target in localhost for emulated iscsi
        """
        selinux_mode = None

        # create image disk
        if not os.path.isfile(self.emulated_image):
            process.system(self.create_cmd)
        else:
            emulated_image_size = os.path.getsize(self.emulated_image) // 1024
            if emulated_image_size != self.emulated_expect_size:
                # No need to remvoe, rebuild is fine
                process.system(self.create_cmd)

        # confirm if the target exists and create iSCSI target
        cmd = "targetcli ls /iscsi 1"
        output = decode_to_text(process.system_output(cmd))
        if not re.findall("%s$" % self.target, output, re.M):
            logging.debug("Need to export target in host")

            # Set selinux to permissive mode to make sure
            # iscsi target export successfully
            if utils_selinux.is_enforcing():
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")

            # In fact, We've got two options here
            #
            # 1) Create a block backstore that usually provides the best
            #    performance. We can use a block device like /dev/sdb or
            #    a logical volume previously created,
            #     (lvcreate -name lv_iscsi -size 1G vg)
            # 2) Create a fileio backstore,
            #    which enables the local file system cache.
            #
            # This class Only works for emulated iscsi device,
            # So fileio backstore is enough and safe.

            # Create a fileio backstore
            device_cmd = ("targetcli /backstores/fileio/ create %s %s" %
                          (self.device, self.emulated_image))
            output = decode_to_text(process.system_output(device_cmd))
            if "Created fileio" not in output:
                raise exceptions.TestFail("Failed to create fileio %s. (%s)" %
                                          (self.device, output))

            # Create an IQN with a target named target_name
            target_cmd = "targetcli /iscsi/ create %s" % self.target
            output = decode_to_text(process.system_output(target_cmd))
            if "Created target" not in output:
                raise exceptions.TestFail("Failed to create target %s. (%s)" %
                                          (self.target, output))

            check_portal = "targetcli /iscsi/%s/tpg1/portals ls" % self.target
            portal_info = decode_to_text(process.system_output(check_portal))
            if "0.0.0.0:3260" not in portal_info:
                # Create portal
                # 0.0.0.0 means binding to INADDR_ANY
                # and using default IP port 3260
                portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s" %
                              (self.target, "0.0.0.0"))
                output = decode_to_text(process.system_output(portal_cmd))
                if "Created network portal" not in output:
                    raise exceptions.TestFail("Failed to create portal. (%s)" %
                                              output)
            if ("ipv6" == utils_net.IPAddress(self.portal_ip).version
                    and self.portal_ip not in portal_info):
                # Ipv6 portal address can't be created by default,
                # create ipv6 portal if needed.
                portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s" %
                              (self.target, self.portal_ip))
                output = decode_to_text(process.system_output(portal_cmd))
                if "Created network portal" not in output:
                    raise exceptions.TestFail("Failed to create portal. (%s)" %
                                              output)
            # Create lun
            lun_cmd = "targetcli /iscsi/%s/tpg1/luns/ " % self.target
            dev_cmd = "create /backstores/fileio/%s" % self.device
            output = decode_to_text(process.system_output(lun_cmd + dev_cmd))
            luns = re.findall(r"Created LUN (\d+).", output)
            if not luns:
                raise exceptions.TestFail("Failed to create lun. (%s)" %
                                          output)
            self.luns = luns[0]

            # Set firewall if it's enabled
            output = decode_to_text(
                process.system_output("firewall-cmd --state",
                                      ignore_status=True))
            if re.findall("^running", output, re.M):
                # firewall is running
                process.system("firewall-cmd --permanent --add-port=3260/tcp")
                process.system("firewall-cmd --reload")

            # Restore selinux
            if selinux_mode is not None:
                utils_selinux.set_status(selinux_mode)

            self.export_flag = True
        else:
            logging.info("Target %s has already existed!" % self.target)

        if self.chap_flag:
            # Set CHAP authentication on the exported target
            self.set_chap_auth_target()
            # Set CHAP authentication for initiator to login target
            if self.portal_visible():
                self.set_chap_auth_initiator()
        else:
            # To enable that so-called "demo mode" TPG operation,
            # disable all authentication for the corresponding Endpoint.
            # which means grant access to all initiators,
            # so that they can access all LUNs in the TPG
            # without further authentication.
            auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
            attr_cmd = ("set attribute %s %s %s %s" %
                        ("authentication=0", "demo_mode_write_protect=0",
                         "generate_node_acls=1", "cache_dynamic_acls=1"))
            output = decode_to_text(process.system_output(auth_cmd + attr_cmd))
            logging.info("Define access rights: %s" % output)
            # Discovery the target
            self.portal_visible()

        # Save configuration
        process.system("targetcli / saveconfig")

        # Restart iSCSI service
        restart_iscsid()
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {
        'model': sec_model,
        'label': img_label,
        'relabel': sec_relabel
    }
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {
                    'name': vol_name,
                    'format': vol_format,
                    'capacity': 1073741824,
                    'allocation': 1048576,
                }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" %
                                      qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path,
                                                  context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{
                "attrs": {
                    source_type: img_path
                },
                "seclabels": source_seclabel
            })
        else:
            disk_source = disk_xml.new_disk_source(
                **{"attrs": {
                    source_type: img_path
                }})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml,
                                         flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(
                                val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1,
                                                   0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s",
                              to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set image and
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    (4) Destroy VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
    qemu_group_user = "******" == params.get("qemu_group_user", "no")
    # Get variables about seclabel for VM.
    sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
    sec_label = params.get("dac_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
    security_default_confined = params.get("security_default_confined", None)
    set_process_name = params.get("set_process_name", None)
    sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
    if sec_label:
        sec_dict['label'] = sec_label
    set_sec_label = "yes" == params.get("set_sec_label", "no")
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", None)
    qemu_group = params.get("qemu_group", None)
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('dac_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    qemu_disk_mod = False
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        label_list = img_label.split(":")
        os.chown(disk_path, int(label_list[0]), int(label_list[1]))
        os.close(f)
        st = os.stat(disk_path)
        if not bool(st.st_mode & stat.S_IWGRP):
            # add group wirte mode to disk by chmod g+w
            os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
            qemu_disk_mod = True

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    def _create_user():
        """
        Create a "vdsm_fake" in 'qemu' group for test
        """
        logging.debug("create a user 'vdsm_fake' in 'qemu' group")
        cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
        utils.run(cmd, ignore_status=False)

    create_qemu_user = False
    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Check qemu_group_user
        if qemu_group_user:
            if set_qemu_conf:
                if "EXAMPLE" in qemu_user:
                    if not check_qemu_grp_user("vdsm_fake"):
                        _create_user()
                        create_qemu_user = True
                    qemu_user = "******"
                    qemu_group = "qemu"
            if set_sec_label:
                if sec_label:
                    if "EXAMPLE" in sec_label:
                        if not check_qemu_grp_user("vdsm_fake"):
                            _create_user()
                            create_qemu_user = True
                        sec_label = "vdsm_fake:qemu"
                        sec_dict['label'] = sec_label
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        if set_qemu_conf:
            # Transform qemu user and group to "uid:gid"
            qemu_user = qemu_user.replace("+", "")
            qemu_group = qemu_group.replace("+", "")
            qemu_conf_label_trans = format_user_group_str(qemu_user, qemu_group)

            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            if security_default_confined:
                qemu_conf.security_default_confined = security_default_confined
            if set_process_name:
                qemu_conf.set_process_name = set_process_name
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if set_sec_label:
            # Transform seclabel to "uid:gid"
            if sec_label:
                sec_label = sec_label.replace("+", "")
                if ":" in sec_label:
                    user, group = sec_label.split(":")
                    sec_label_trans = format_user_group_str(user, group)

            # Set the context of the VM.
            logging.debug("sec_dict is %s" % sec_dict)
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()
            logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the qemu process and image.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)

            # Get vm image label when VM is running
            f = os.open(disks.values()[0]['source'], 0)
            stat_re = os.fstat(f)
            disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)

            # Check vm process and image DAC label after vm start
            if set_sec_label and sec_label:
                if ":" in sec_label:
                    if vm_context != sec_label_trans:
                        raise error.TestFail("Label of VM processs is not "
                                             "expected after starting.\nDetail:"
                                             "vm_context=%s, sec_label_trans=%s"
                                             % (vm_context, sec_label_trans))
                    if sec_relabel == "yes":
                        if dynamic_ownership:
                            if disk_context != sec_label_trans:
                                raise error.TestFail("Label of disk is not " +
                                                     "expected" +
                                                     " after VM starting.\n" +
                                                     "Detail: disk_context" +
                                                     "=%s" % disk_context +
                                                     ", sec_label_trans=%s."
                                                     % sec_label_trans)
            elif set_qemu_conf and not security_default_confined:
                if vm_context != qemu_conf_label_trans:
                    raise error.TestFail("Label of VM processs is not expected"
                                         " after starting.\nDetail: vm_context="
                                         "%s, qemu_conf_label_trans=%s"
                                         % (vm_context, qemu_conf_label_trans))
                if disk_context != qemu_conf_label_trans:
                    if dynamic_ownership:
                        raise error.TestFail("Label of disk is not expected " +
                                             "after VM starting.\nDetail: di" +
                                             "sk_context=%s, " % disk_context +
                                             "qemu_conf_label_trans=%s." %
                                             qemu_conf_label_trans)

            # check vm started with -name $vm_name,process=qemu:$vm_name
            if set_process_name:
                chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
                cmd = "ps -p %s -o command=" % vm_pid
                result = utils.run(cmd)
                if chk_str in result.stdout:
                    logging.debug("%s found in vm process command: %s" %
                                  (chk_str, result.stdout))
                else:
                    raise error.TestFail("%s not in vm process command: %s" %
                                         (chk_str, result.stdout))

            # Check the label of disk after VM being destroyed.
            vm.destroy()
            f = os.open(disks.values()[0]['source'], 0)
            stat_re = os.fstat(f)
            img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)
            if set_sec_label and sec_relabel == "yes":
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when sec_relabel enabled.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        raise error.TestFail("Label of disk is img_label_after"
                                             ":%s" % img_label_after + ", it "
                                             "did not restore to 0:0 in VM "
                                             "shuting down.")
            elif set_qemu_conf and not set_sec_label:
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when only set qemu.conf.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        raise error.TestFail("Label of disk is img_label_after"
                                             ":%s" % img_label_after + ", it "
                                             "did not restore to 0:0 in VM "
                                             "shuting down.")
                else:
                    if (not img_label_after == img_label):
                        raise error.TestFail("Bug: Label of disk is changed\n"
                                             "Detail: img_label_after=%s, "
                                             "img_label=%s.\n"
                                             % (img_label_after, img_label))
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                err_msg = "Domain start fail not due to  DAC setting, check "
                err_msg += "more in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=856951"
                if set_sec_label:
                    if sec_label:
                        if sec_relabel == "yes" and sec_label_trans == "0:0":
                            if set_qemu_conf:
                                if qemu_conf_label_trans == "107:107":
                                    raise error.TestNAError(err_msg)
                        elif sec_relabel == "no" and sec_label_trans == "0:0":
                            if not set_qemu_conf:
                                raise error.TestNAError(err_msg)
                else:
                    raise error.TestFail("Test failed in positive case."
                                         "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
            if qemu_disk_mod:
                st = os.stat(path)
                os.chmod(path, st.st_mode ^ stat.S_IWGRP)
        if set_sec_label:
            backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        if create_qemu_user:
            cmd = "userdel -r vdsm_fake"
            output = utils.run(cmd, ignore_status=True)
        utils_selinux.set_status(backup_sestatus)
    def _migrate(self):
        """
        1.Set selinux state
        2.Record vm uptime
        3.For postcopy migration:
            1) Set migration speed to low value
            2) Monitor postcopy event
        4.Do live migration
        5.Check migration result: succeed or fail with expected error
        6.For postcopy migration: check postcopy event
        7.Do post migration check: check vm state, uptime, network
        """
        # Set selinux state before migration
        # NOTE: if selinux state is set too early, it may be changed
        # in other methods unexpectedly, so set it just before migration
        logging.debug("Set selinux to enforcing before migration")
        utils_selinux.set_status(self.selinux_state)
        # TODO: Set selinux on migrate_dest_host

        # Check vm uptime before migration
        logging.debug("Check vm uptime before migration")
        self.uptime = {}
        for vm in self.vms:
            self.uptime[vm.name] = vm.uptime(connect_uri=vm.connect_uri)

        # Do postcopy/precopy related operations/setting
        if self.migrate_flags & VIR_MIGRATE_POSTCOPY:
            # Set migration speed to low value in case it finished too early
            # before postcopy mode starts
            for vm in self.vms:
                virsh.migrate_setspeed(vm.name, 1, uri=vm.connect_uri)

            # Monitor event "Suspended Post-copy" for postcopy migration
            logging.debug("Monitor the event for postcopy migration")
            virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                               auto_close=True)
            self.objs_list.append(virsh_session)
            cmd = "event %s --loop --all --timestamp" % self.main_vm.name
            virsh_session.sendline(cmd)

            # Set func to be executed during postcopy migration
            func = virsh.migrate_postcopy
        else:
            # Set func to be executed during precopy migration
            func = None

        # Start to do migration
        logging.debug("Start to do migration")
        thread_timeout = self.migrate_thread_timeout
        self.obj_migration.do_migration(self.vms,
                                        self.src_uri,
                                        self.dest_uri,
                                        "orderly",
                                        options=self.virsh_migrate_options,
                                        thread_timeout=thread_timeout,
                                        ignore_status=True,
                                        virsh_uri=self.src_uri,
                                        func=func,
                                        shell=True)

        logging.info("Check migration result: succeed or"
                     " fail with expected error")
        self.obj_migration.check_result(self.obj_migration.ret, self.params)

        # Check "suspended post-copy" event after postcopy migration
        if self.migrate_flags & VIR_MIGRATE_POSTCOPY:
            logging.debug("Check event after postcopy migration")
            virsh_session.send_ctrl("^c")
            events_output = virsh_session.get_stripped_output()
            logging.debug("Events_output are %s", events_output)
            pattern = "Suspended Post-copy"
            if pattern not in events_output:
                self.test.error("Migration didn't switch to postcopy mode")

        logging.debug("Do post migration check after migrate to dest")
        self.params["migrate_options"] = self.virsh_migrate_options
        self.obj_migration.post_migration_check(self.vms,
                                                self.params,
                                                self.uptime,
                                                uri=self.dest_uri)
        if not utils_misc.wait_for(_check_state, timeout):
            raise error.TestFail("Guest does not paused, it is %s now" %
                                 vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    raise error.TestFail(
                        "Failed to get expect result, get %s" %
                        output.stdout.strip())
            else:
                raise error.TestFail("Fail to get domblkerror info:%s" %
                                     output.stderr)
    finally:
        logging.info("Do clean steps")
        if error_type == "unspecified error":
            nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            utils.run("umount %s" % nfs_dir)
            utils_selinux.set_status(selinux_bak)
        elif error_type == "no space":
            vm.destroy()
            _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Save VM and check the context.
    (4).Restore VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_save_restore_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_save_restore_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_save_restore_vm_sec_model", "selinux")
    sec_label = params.get("svirt_save_restore_vm_sec_label", None)
    sec_relabel = params.get("svirt_save_restore_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_save_restore_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    # Init a path to save VM.
    save_path = os.path.join(data_dir.get_tmp_dir(), "svirt_save")
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            vm.save_to_file(path=save_path)
            vm.restore_from_file(path=save_path)
            # Save and restore VM successfully.
            if status_error:
                test.fail("Test succeeded in negative case.")
        except virt_vm.VMError as e:
            if not status_error:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                if str(e).count("getfd"):
                    error_msg += ("More info pleass refer to"
                                  " https://bugzilla.redhat.com/show_bug.cgi?id=976632")
                test.fail(error_msg)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Exemple #32
0
def run(test, params, env):
    """
    Test svirt in virt-install.

    (1). Init variables.
    (2). Set selinux on host.
    (3). Set label of image.
    (4). run a virt-install command.
    (5). clean up.

    As this test only care whether the qemu-kvm process
    can access the image. It is not necessary to install
    a full os in a vm. Just verify the vm is alive after
    virt-install command is enough. Then we can save a lot
    of time and make this test independent from unattended_install.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_install_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_install_vm_sec_model", "selinux")
    sec_label = params.get("svirt_install_vm_sec_label", None)
    sec_relabel = params.get("svirt_install_vm_sec_relabel", "yes")

    # Set selinux status on host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    # Set the image label.
    disk_label = params.get("svirt_install_disk_label", None)
    vm_name = params.get("main_vm", None)
    # svirt will prevent accessing via a symble link.
    data_path = data_dir.get_data_dir()
    real_data_path = os.path.realpath(data_path)
    image_path = os.path.join(real_data_path, "svirt_image")
    if virsh.domain_exists(vm_name):
        virsh.remove_domain(vm_name)
    if not os.path.exists(image_path):
        utils_test.libvirt.create_local_disk("file", path=image_path)

    try:
        utils_selinux.set_context_of_file(image_path, disk_label)
        cmd = "virt-install --name %s --import --disk" % vm_name
        cmd += " path=%s --ram '1024' " % image_path
        cmd += " --security"
        if sec_type == 'static':
            if sec_label is None:
                raise ValueError("Seclabel is not setted for static.")
            cmd += " type=static,label=%s" % (sec_label)
        elif sec_type == 'dynamic':
            cmd += " type=dynamic"
        else:
            raise ValueError("Security type %s is not supported." % sec_type)
        if sec_relabel is not None:
            cmd += ",relabel=%s" % sec_relabel

        cmd += " --noautoconsole --graphics vnc --video vga &"
        utils.run(cmd, ignore_status=True)

        def _vm_alive():
            return virsh.is_alive(vm_name)

        if (utils_misc.wait_for(_vm_alive, timeout=5)):
            if status_error:
                raise error.TestFail('Test succeeded in negative case.')
        else:
            if not status_error:
                raise error.TestFail("Test failed in positive case.")
    finally:
        # cleanup
        utils_selinux.set_status(backup_sestatus)
        if virsh.domain_exists(vm_name):
            virsh.remove_domain(vm_name)
        if not os.path.exists(image_path):
            utils_test.libvirt.delete_local_disk("file", path=image_path)
Exemple #33
0
def run(test, params, env):
    """
    Test svirt in virt-install.

    (1). Init variables.
    (2). Set selinux on host.
    (3). Set label of image.
    (4). run a virt-install command.
    (5). clean up.

    As this test only care whether the qemu-kvm process
    can access the image. It is not necessary to install
    a full os in a vm. Just verify the vm is alive after
    virt-install command is enough. Then we can save a lot
    of time and make this test independent from unattended_install.
    """
    # Get general variables.
    status_error = "yes" == params.get("status_error", "no")
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_install_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_install_vm_sec_model", "selinux")
    sec_label = params.get("svirt_install_vm_sec_label", None)
    sec_relabel = params.get("svirt_install_vm_sec_relabel", "yes")

    # Set selinux status on host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    # Set the image label.
    disk_label = params.get("svirt_install_disk_label", None)
    vm_name = params.get("main_vm", None)
    # svirt will prevent accessing via a symble link.
    data_path = data_dir.get_data_dir()
    real_data_path = os.path.realpath(data_path)
    image_path = os.path.join(real_data_path, "svirt_image")
    if virsh.domain_exists(vm_name):
        virsh.remove_domain(vm_name)
    if not os.path.exists(image_path):
        utils_test.libvirt.create_local_disk("file", path=image_path)

    try:
        utils_selinux.set_context_of_file(image_path, disk_label)
        cmd = "virt-install --name %s --import --disk path=%s --ram '1024' " % (vm_name, image_path)
        cmd += " --security"
        if sec_type == "static":
            if sec_label is None:
                raise ValueError("Seclabel is not setted for static.")
            cmd += " type=static,label=%s" % (sec_label)
        elif sec_type == "dynamic":
            cmd += " type=dynamic"
        else:
            raise ValueError("Security type %s is not supported." % sec_type)
        if sec_relabel is not None:
            cmd += ",relabel=%s" % sec_relabel

        cmd += "&"
        utils.run(cmd, ignore_status=True)

        def _vm_alive():
            return virsh.is_alive(vm_name)

        if utils_misc.wait_for(_vm_alive, timeout=5):
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
        else:
            if not status_error:
                raise error.TestFail("Test failed in positive case.")
    finally:
        # cleanup
        utils_selinux.set_status(backup_sestatus)
        if virsh.domain_exists(vm_name):
            virsh.remove_domain(vm_name)
        if not os.path.exists(image_path):
            utils_test.libvirt.delete_local_disk("file", path=image_path)
Exemple #34
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestNAError("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug("attached image ownership on nfs server before "
                          "start: %s" % img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug("attached image ownership on nfs server after"
                              " start: %s" % img_label_after)

            if status_error:
                raise error.TestFail('Test succeeded in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in disks_snap.values():
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
Exemple #35
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """
    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = test_dict.get("start_vm", "no")

    # Server and client parameters
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    client_ip = test_dict.get("client_ip")
    client_user = test_dict.get("client_user")
    client_pwd = test_dict.get("client_pwd")
    server_cn = test_dict.get("server_cn")
    client_cn = test_dict.get("client_cn")
    target_ip = test_dict.get("target_ip", "")
    # generate remote IP
    if target_ip == "":
        if server_cn:
            target_ip = server_cn
        elif server_ip:
            target_ip = server_ip
        else:
            target_ip = target_ip
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # Ceph disk parameters
    driver = test_dict.get("test_driver", "qemu")
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    source_type = test_dict.get("vm_disk_source_type", "file")
    virsh_options = test_dict.get("virsh_options", "--verbose --live")
    vol_name = test_dict.get("vol_name")
    disk_src_protocol = params.get("disk_source_protocol")
    source_file = test_dict.get("disk_source_file")
    disk_format = test_dict.get("disk_format", "qcow2")
    mon_host = params.get("mon_host")
    ceph_key_opt = ""
    attach_disk = False
    # Disk XML file
    disk_xml = None
    # Define ceph_disk conditional variable
    ceph_disk = "yes" == test_dict.get("ceph_disk")

    # For --postcopy enable
    postcopy_options = test_dict.get("postcopy_options")
    if postcopy_options and not virsh_options.count(postcopy_options):
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        test_dict['virsh_options'] = virsh_options

    # For bi-directional and tls reverse test
    uri_port = test_dict.get("uri_port", ":22")
    uri_path = test_dict.get("uri_path", "/system")
    src_uri = test_dict.get("migration_source_uri", "qemu:///system")
    uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port,
                               uri_path)
    test_dict["desuri"] = uri

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, test_dict)
    # Set up SSH key

    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)
    remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                           server_pwd, r"[\#\$]\s*$")
    remote_session.close()
    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)

    # Set up remote ssh key and remote /etc/hosts file for bi-direction migration
    migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no")
    if migrate_vm_back:
        ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd)
        ssh_key.setup_remote_known_hosts_file(client_ip, server_ip,
                                              server_user, server_pwd)
    # Reset Vm state if needed
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Setup migration context
    migrate_setup = migration.MigrationTest()
    migrate_setup.migrate_pre_setup(test_dict["desuri"], params)

    # Install ceph-common on remote host machine.
    remote_ssh_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r"[\#\$]\s*$")
    if not utils_package.package_install(["ceph-common"], remote_ssh_session):
        test.error("Failed to install required packages on remote host")
    remote_ssh_session.close()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # Get initial Selinux config flex bit
        LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status()
        logging.info("previous local enforce :%s",
                     LOCAL_SELINUX_ENFORCING_STATUS)
        cmd_result = remote.run_remote_cmd('getenforce', params,
                                           runner_on_target)
        REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text
        logging.info("previous remote enforce :%s",
                     REMOTE_SELINUX_ENFORCING_STATUS)

        if ceph_disk:
            logging.info(
                "Put local SELinux in permissive mode when test ceph migrating"
            )
            utils_selinux.set_status("enforcing")

            logging.info("Put remote SELinux in permissive mode")
            cmd = "setenforce enforcing"
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            if status:
                test.Error("Failed to set SELinux " "in permissive mode")

            # Prepare ceph disk.
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            test_dict['key_file'] = key_file
            test_dict['first_disk'] = vm.get_first_disk_devices()
            ceph_key_opt, secret_uuid = prepare_ceph_disk(
                test_dict, remote_virsh_dargs, test, runner_on_target)
            host_ip = test_dict.get('mon_host')
            disk_image = test_dict.get('disk_img')

            # Build auth information.
            auth_attrs = {}
            auth_attrs['auth_user'] = params.get("auth_user")
            auth_attrs['secret_type'] = params.get("secret_type")
            auth_attrs['secret_uuid'] = secret_uuid
            build_disk_xml(vm_name,
                           disk_format,
                           host_ip,
                           disk_src_protocol,
                           vol_name,
                           disk_image,
                           auth=auth_attrs)

            vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name,
                                     shell=True).stdout_text
            logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt)
            try:
                if vm.is_dead():
                    vm.start()
            except virt_vm.VMStartError as e:
                logging.info("Failed to start VM")
                test.fail("Failed to start VM: %s" % vm_name)

        # Ensure the same VM name doesn't exist on remote host before migrating.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        # Trigger migration
        migrate_vm(test, test_dict)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()
            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            logging.info(output)
            if status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target)
                test.fail("Failed to run '%s' on remote: %s" % (cmd, output))
    finally:
        logging.info("Recovery test environment")
        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True)
        # Ensure VM can be cleaned up on remote host even migrating fail.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target)

        logging.info("Recovery VM XML configuration")
        vmxml_backup.sync()
        logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile)

        # Clean up ceph environment.
        if disk_src_protocol == "rbd":
            # Clean up secret
            secret_list = get_secret_list()
            if secret_list:
                for secret_uuid in secret_list:
                    virsh.secret_undefine(secret_uuid)
            # Clean up dirty secrets on remote host if testing involve in ceph auth.
            client_name = test_dict.get('client_name')
            client_key = test_dict.get("client_key")
            if client_name and client_key:
                try:
                    remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                    remote_dirty_secret_list = get_secret_list(remote_virsh)
                    for dirty_secret_uuid in remote_dirty_secret_list:
                        remote_virsh.secret_undefine(dirty_secret_uuid)
                except (process.CmdError, remote.SCPError) as detail:
                    test.Error(detail)
                finally:
                    remote_virsh.close_session()
            # Delete the disk if it exists.
            disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img'))
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, ceph_key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        if LOCAL_SELINUX_ENFORCING_STATUS:
            logging.info("Restore SELinux in original mode")
            utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS)
        if REMOTE_SELINUX_ENFORCING_STATUS:
            logging.info("Put remote SELinux in original mode")
            cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS
            remote.run_remote_cmd(cmd, params, runner_on_target)

        # Remove known hosts on local host
        cmd = "ssh-keygen -R  %s" % server_ip
        process.run(cmd, ignore_status=True, shell=True)

        # Remove known hosts on remote host
        cmd = "ssh-keygen -R  %s" % client_ip
        remote.run_remote_cmd(cmd, params, runner_on_target)
Exemple #36
0
    def export_target(self):
        """
        Export target in localhost for emulated iscsi
        """
        selinux_mode = None

        if not os.path.isfile(self.emulated_image):
            utils.system(self.create_cmd)
        else:
            emulated_image_size = os.path.getsize(self.emulated_image) / 1024
            if emulated_image_size != self.emulated_expect_size:
                # No need to remvoe, rebuild is fine
                utils.system(self.create_cmd)
        cmd = "tgtadm --lld iscsi --mode target --op show"
        try:
            output = utils.system_output(cmd)
        except error.CmdError:
            utils.system("service tgtd restart")
            output = utils.system_output(cmd)
        if not re.findall("%s$" % self.target, output, re.M):
            logging.debug("Need to export target in host")

            # Set selinux to permissive mode to make sure iscsi target
            # export successfully
            if utils_selinux.is_enforcing():
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")

            output = utils.system_output(cmd)
            used_id = re.findall("Target\s+(\d+)", output)
            emulated_id = 1
            while str(emulated_id) in used_id:
                emulated_id += 1
            self.emulated_id = str(emulated_id)
            cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id
            cmd += " --lld iscsi --targetname %s" % self.target
            utils.system(cmd)
            cmd = "tgtadm --lld iscsi --op bind --mode target "
            cmd += "--tid %s -I ALL" % self.emulated_id
            utils.system(cmd)
        else:
            target_strs = re.findall("Target\s+(\d+):\s+%s$" %
                                     self.target, output, re.M)
            self.emulated_id = target_strs[0].split(':')[0].split()[-1]

        cmd = "tgtadm --lld iscsi --mode target --op show"
        try:
            output = utils.system_output(cmd)
        except error.CmdError:   # In case service stopped
            utils.system("service tgtd restart")
            output = utils.system_output(cmd)

        # Create a LUN with emulated image
        if re.findall(self.emulated_image, output, re.M):
            # Exist already
            logging.debug("Exported image already exists.")
            self.export_flag = True
        else:
            tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target,
                                output, re.DOTALL)
            if tgt_str:
                luns = len(re.findall("\s+LUN:\s(\d+)",
                                      tgt_str.group(1), re.M))
            else:
                luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
            cmd = "tgtadm --mode logicalunit --op new "
            cmd += "--tid %s --lld iscsi " % self.emulated_id
            cmd += "--lun %s " % luns
            cmd += "--backing-store %s" % self.emulated_image
            utils.system(cmd)
            self.export_flag = True

        # Restore selinux
        if selinux_mode is not None:
            utils_selinux.set_status(selinux_mode)

        if self.chap_flag:
            # Set CHAP authentication on the exported target
            self.set_chap_auth_target()
            # Set CHAP authentication for initiator to login target
            if self.portal_visible():
                self.set_chap_auth_initiator()
Exemple #37
0
    def export_target(self):
        """
        Export target in localhost for emulated iscsi
        """
        selinux_mode = None

        # create image disk
        if not os.path.isfile(self.emulated_image):
            utils.system(self.create_cmd)
        else:
            emulated_image_size = os.path.getsize(self.emulated_image) / 1024
            if emulated_image_size != self.emulated_expect_size:
                # No need to remvoe, rebuild is fine
                utils.system(self.create_cmd)

        # confirm if the target exists and create iSCSI target
        cmd = "targetcli ls /iscsi 1"
        output = utils.system_output(cmd)
        if not re.findall("%s$" % self.target, output, re.M):
            logging.debug("Need to export target in host")

            # Set selinux to permissive mode to make sure
            # iscsi target export successfully
            if utils_selinux.is_enforcing():
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")

            # In fact, We've got two options here
            #
            # 1) Create a block backstore that usually provides the best
            #    performance. We can use a block device like /dev/sdb or
            #    a logical volume previously created,
            #     (lvcreate -name lv_iscsi -size 1G vg)
            # 2) Create a fileio backstore,
            #    which enables the local file system cache.
            #
            # This class Only works for emulated iscsi device,
            # So fileio backstore is enough and safe.

            # Create a fileio backstore
            device_cmd = ("targetcli /backstores/fileio/ create %s %s" %
                          (self.device, self.emulated_image))
            output = utils.system_output(device_cmd)
            if "Created fileio" not in output:
                raise error.TestFail("Failed to create fileio %s. (%s)",
                                     self.device, output)

            # Create an IQN with a target named target_name
            target_cmd = "targetcli /iscsi/ create %s" % self.target
            output = utils.system_output(target_cmd)
            if "Created target" not in output:
                raise error.TestFail("Failed to create target %s. (%s)",
                                     self.target, output)

            check_portal = "targetcli /iscsi/%s/tpg1/portals ls" % self.target
            if "0.0.0.0:3260" not in utils.system_output(check_portal):
                # Create portal
                # 0.0.0.0 means binding to INADDR_ANY
                # and using default IP port 3260
                portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s"
                              % (self.target, "0.0.0.0"))
                output = utils.system_output(portal_cmd)
                if "Created network portal" not in output:
                    raise error.TestFail("Failed to create portal. (%s)",
                                         output)

            # Create lun
            lun_cmd = "targetcli /iscsi/%s/tpg1/luns/ " % self.target
            dev_cmd = "create /backstores/fileio/%s" % self.device
            output = utils.system_output(lun_cmd + dev_cmd)
            if "Created LUN" not in output:
                raise error.TestFail("Failed to create lun. (%s)",
                                     output)

            # Set firewall if it's enabled
            output = utils.system_output("firewall-cmd --state",
                                         ignore_status=True)
            if re.findall("^running", output, re.M):
                # firewall is running
                utils.system("firewall-cmd --permanent --add-port=3260/tcp")
                utils.system("firewall-cmd --reload")

            # Restore selinux
            if selinux_mode is not None:
                utils_selinux.set_status(selinux_mode)

            self.export_flag = True
        else:
            logging.info("Target %s has already existed!" % self.target)

        if self.chap_flag:
            # Set CHAP authentication on the exported target
            self.set_chap_auth_target()
            # Set CHAP authentication for initiator to login target
            if self.portal_visible():
                self.set_chap_auth_initiator()
        else:
            # To enable that so-called "demo mode" TPG operation,
            # disable all authentication for the corresponding Endpoint.
            # which means grant access to all initiators,
            # so that they can access all LUNs in the TPG
            # without further authentication.
            auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
            attr_cmd = ("set attribute %s %s %s %s" %
                        ("authentication=0",
                         "demo_mode_write_protect=0",
                         "generate_node_acls=1",
                         "cache_dynamic_acls=1"))
            output = utils.system_output(auth_cmd + attr_cmd)
            logging.info("Define access rights: %s" % output)

        # Save configuration
        utils.system("targetcli / saveconfig")
def run(test, params, env):
    """
    Test DAC in save/restore domain to nfs pool.

    (1).Init variables for test.
    (2).Create nfs pool
    (3).Start VM and check result.
    (4).Save domain to the nfs pool.
    (5).Restore domain from the nfs file.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_save_restore_host_selinux",
                               "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options", "rw,async,no_root_squash")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool file variables
    pre_file = "yes" == params.get("pre_file", "yes")
    pre_file_name = params.get("pre_file_name", "dac_nfs_file")
    file_tup = ("file_user", "file_group", "file_mode")
    file_val = []
    for i in file_tup:
        try:
            file_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    file_user, file_group, file_mode = file_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    if vm.is_alive():
        vm.destroy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk mode to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for save/restore
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     emulated_image,
                     image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # Set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Create a file on nfs server dir.
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        server_file_path = os.path.join(nfs_path, pre_file_name)
        if pre_file and not os.path.exists(server_file_path):
            open(server_file_path, 'a').close()
        if not pre_file and os.path.exists(server_file_path):
            raise error.TestNAError("File %s already exist in pool %s" %
                                    (server_file_path, pool_name))

        # Get nfs mount file path
        mnt_path = os.path.join(tmp_dir, pool_target)
        mnt_file_path = os.path.join(mnt_path, pre_file_name)

        # Change img ownership and mode on nfs server dir
        if pre_file:
            os.chown(server_file_path, file_user, file_group)
            os.chmod(server_file_path, file_mode)

        # Start VM.
        try:
            vm.start()
            # Start VM successfully.
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            raise error.TestFail("Domain failed to start. " "error: %s" % e)

        label_before = check_ownership(server_file_path)
        if label_before:
            logging.debug("file ownership on nfs server before save: %s" %
                          label_before)

        # Save domain to nfs pool file
        save_re = virsh.save(vm_name, mnt_file_path, debug=True)
        if save_re.exit_status:
            if not status_error:
                raise error.TestFail("Failed to save domain to nfs pool file.")
        else:
            if status_error:
                raise error.TestFail("Save domain to nfs pool file succeeded, "
                                     "expected Fail.")

        label_after = check_ownership(server_file_path)
        if label_after:
            logging.debug("file ownership on nfs server after save: %s" %
                          label_after)

        # Restore domain from the nfs pool file
        if not save_re.exit_status:
            restore_re = virsh.restore(mnt_file_path, debug=True)
            if restore_re.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to restore domain from nfs "
                                         "pool file.")
            else:
                if status_error:
                    raise error.TestFail("Restore domain from nfs pool file "
                                         "succeeded, expected Fail.")

            label_after_rs = check_ownership(server_file_path)
            if label_after_rs:
                logging.debug(
                    "file ownership on nfs server after restore: %s" %
                    label_after_rs)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get(
        "svirt_undefine_define_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_undefine_define_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_undefine_define_vm_sec_model", "selinux")
    sec_label = params.get("svirt_undefine_define_vm_sec_label", None)
    sec_relabel = params.get("svirt_undefine_define_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_undefine_define_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    try:
        xml_file = (os.path.join(data_dir.get_tmp_dir(), "vmxml"))
        if vm.is_alive():
            vm.destroy()
        virsh.dumpxml(vm.name, to_file=xml_file)
        cmd_result = virsh.undefine(vm.name)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to undefine vm."
                                 "Detail: %s" % cmd_result)
        cmd_result = virsh.define(xml_file)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to define vm."
                                 "Detail: %s" % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []
    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                sec_dict['model'] = model
                if sec_type != "none":
                    sec_dict['label'] = sec_label
                sec_dict_copy = sec_dict.copy()
                sec_dict_list.append(sec_dict_copy)
        else:
            sec_dict['model'] = sec_model
            if sec_type != "none":
                sec_dict['label'] = sec_label
            sec_dict_list.append(sec_dict)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
                             "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Set disk label
        for disk in disks.values():
            disk_path = disk['source']
            utils_selinux.set_context_of_file(filename=disk_path,
                                              context=img_label)
            os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined or
                security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after "
                                     "starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s"
                                     % (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s."
                                     % (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                if not disk_context == imagelabel:
                    raise error.TestFail("Label of disk is not relabeled by "
                                         "VM\nDetal: disk_context="
                                         "%s, imagelabel=%s"
                                         % (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                # Bug 547546 - RFE: the security drivers must remember original
                # permissions/labels and restore them after
                # https://bugzilla.redhat.com/show_bug.cgi?id=547546
                err_msg = "Label of disk is not restored in VM shuting down.\n"
                err_msg += "Detail: img_label_after=%s, " % img_label_after
                err_msg += "img_label_before=%s.\n" % img_label
                err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=547546"
                raise error.TestFail(err_msg)
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        for path, label in backup_ownership_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined or
                security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Exemple #41
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    output_storage = params.get('output_storage', 'default')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    skip_check = 'yes' == params.get('skip_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    multi_kernel_list = ['multi_kernel', 'debug_kernel']
    backup_list = [
        'virtio_on', 'virtio_off', 'floppy', 'floppy_devmap', 'fstab_cdrom',
        'fstab_virtio', 'multi_disks', 'sata_disk', 'network_virtio',
        'network_rtl8139', 'network_e1000', 'multi_netcards', 'spice',
        'spice_encrypt', 'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus',
        'blank_2nd_disk', 'listen_none', 'listen_socket', 'only_net', 'only_br'
    ]
    error_list = []

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """
        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                logging.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        logging.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            logging.debug("Disks in VM: %s", output)
            disks = len(re.findall('Disk\s\d', output))
        logging.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            logging.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    @vm_shell
    def check_vmlinuz_initramfs(v2v_result):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        logging.info('Checking if vmlinuz matches initramfs')
        kernels = re.search(
            'kernel packages in this guest:.*?(\(kernel.*?\).*?){2,}',
            v2v_result, re.DOTALL)
        try:
            lines = kernels.group(0)
            kernel_list = re.findall('\((.*?)\)', lines)
            for kernel in kernel_list:
                vmlinuz = re.search(r'/boot/vmlinuz-(.*?),', kernel).group(1)
                initramfs = \
                    re.search(r'/boot/initramfs-(.*?)\.img', kernel).group(1)
                logging.debug('vmlinuz version is: %s' % vmlinuz)
                logging.debug('initramfs version is: %s' % initramfs)
                if vmlinuz != initramfs:
                    log_fail('vmlinuz not match with initramfs')
        except Exception as e:
            test.error('Error on find kernel info \n %s' % str(e))

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        current_kernel = vmcheck.session.cmd('uname -r').strip()
        logging.debug('Current kernel: %s' % current_kernel)
        if current_kernel == '3.10.0-799.el7.x86_64':
            logging.debug('The kernel is the latest kernel')
        else:
            log_fail('VM should choose lastest kernel not %s' % current_kernel)

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after convertion
        """
        blk = vmcheck.session.cmd('lsblk')
        logging.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {
            'driver': 'qemu',
            'subdriver': 'raw',
            'sourcetype': 'file',
            'type': type,
            'targetbus': bus[type]
        }
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        logging.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev',
                       dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        logging.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, virsh_instance):
        """
        Check if number and type of network cards meet expectation
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        iflist = vmxml.get_iface_all()
        logging.debug('MAC list before v2v: %s' % mac_list)
        logging.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {
            'root': 'e2label %s ROOT',
            'swap': 'swaplabel -L SWAPPER %s'
        }
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'virtio', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            logging.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL', 'virtio': '/vd'}
            logging.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify virtio device
                elif type == 'virtio':
                    entry = session.cmd('cat /etc/fstab|grep /boot').strip()
                    # Get the ID (no matter what, usually UUID)
                    origin = entry.split()[0]
                    key = origin.split('=')[1]
                    blkinfo = session.cmd('blkid|grep %s' % key).strip()
                    # Replace with virtio disk path
                    replace = blkinfo.split()[0].strip(':')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        logging.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_df = "df -m / --output=avail"
        df_output = session.cmd(cmd_df).strip()
        logging.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        logging.info('Available space: %dM' % avail)
        if avail > left_space - 1:
            tmp_dir = data_dir.get_tmp_dir()
            if session.cmd_status('ls %s' % tmp_dir) != 0:
                session.cmd('mkdir %s' % tmp_dir)
            large_file = os.path.join(tmp_dir, 'file.large')
            cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                         (large_file, avail - left_space + 2)
            session.cmd(cmd_create, timeout=v2v_timeout)
        logging.info('Available space: %sM' % session.cmd(cmd_df).strip())

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        logging.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            logging.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        firewalld_status = session.cmd(
            'systemctl status firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        logging.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd(
            'systemctl status '
            'firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        logging.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Excecute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            logging.info('Send command "%s"', cmd)
            status, output = session.cmd_status_output(cmd)
            logging.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        logging.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after convertion.
        """
        logging.info('Check time drift')
        output = vmcheck.session.cmd('ntpdate -q %s' % ntp_server)
        logging.debug(output)
        drift = abs(float(output.split()[-2]))
        logging.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            logging.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            logging.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s', str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if skip_check:
            logging.info('Skip checking vm after conversion')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after convertion
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=vmchecker.virsh_instance)
            logging.debug(vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
            if checkpoint == 'floppy':
                check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'],
                                     vmchecker.virsh_instance)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmxml.get_devices('video')[0].model_type
                    if video_type.lower() != 'qxl':
                        log_fail('Video expect QXL, actual %s' % video_type)
            if checkpoint.startswith('listen'):
                listen_type = vmxml.get_devices('graphics')[0].listen_type
                logging.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                logging.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': remote_host,
            'hypervisor': 'kvm',
            'v2v_opts': '-v -x',
            'storage': output_storage,
            'network': network,
            'bridge': bridge,
            'target': target,
            'main_vm': vm_name,
            'input_mode': 'libvirt',
            'new_name': vm_name + '_' + utils_misc.generate_random_string(3)
        }
        if output_format:
            v2v_params.update({'output_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
        if output_mode == 'local':
            v2v_params['storage'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        backup_xml = None
        if checkpoint in backup_list:
            backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if checkpoint == 'multi_disks':
            attach_disk_path = os.path.join(data_dir.get_tmp_dir(),
                                            'attach_disks')
            utlv.attach_disks(env.get_vm(vm_name), attach_disk_path, None,
                              params)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disk_count = 0
            for disk in list(new_xml.get_disk_all().values()):
                if disk.get('device') == 'disk':
                    disk_count += 1
            params['ori_disks'] = disk_count
        if checkpoint == 'virtio_on':
            change_disk_bus('virtio')
        if checkpoint == 'virtio_off':
            change_disk_bus('ide')
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            if checkpoint == 'fstab_virtio':
                change_disk_bus('virtio')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            logging.info('VM state: %s' %
                         virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            logging.info('VM state: %s' %
                         virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint == 'no_space':

            @vm_shell
            def take_space(**kwargs):
                create_large_file(kwargs['session'], 20)

            take_space()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            create_large_file(session, 1000)
            if checkpoint == 'host_no_space_setcache':
                logging.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            attach_network_card('virtio')
            attach_network_card('e1000')
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') == 'network':
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            logging.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {
                    'type': 'spice',
                    'passwd': params.get('spice_passwd', 'redhat')
                }
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {
                    'type': 'spice',
                    'passwdValidTo': '1970-01-01T00:00:01'
                }
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                logging.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    logging.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find('devices').find(
                        'video').find('model')
                    video.set('type', video_type)
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find('devices').find('graphics').find(
                'listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            logging.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            logging.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            logging.info('Set service ntpd on')
            cmd = ['yum -y install ntp', 'systemctl start ntpd']
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            logging.info('Sync time with %s', ntp_server)
            cmd = ['yum -y install ntpdate', 'ntpdate %s' % ntp_server]
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            logging.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            logging.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            logging.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            logging.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            logging.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            logging.info('Attach network')
            virsh.attach_interface(vm_name,
                                   'network default --current',
                                   debug=True)
            v2v_params.pop('bridge')
        if checkpoint == 'only_br':
            logging.info('Attatch bridge')
            virsh.attach_interface(vm_name,
                                   'bridge virbr0 --current',
                                   debug=True)
            v2v_params.pop('network')
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            logging.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        virsh.dumpxml(vm_name, debug=True)
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status(
                    'firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            large_file = os.path.join(data_dir.get_tmp_dir(), 'file.large')
            if os.path.isfile(large_file):
                os.remove(large_file)
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': vm_sec_model,
                'relabel': vm_sec_relabel}
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel,
                    'sec_label': sec_label}
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    logging.debug("The guest failed to start as expected,"
                                  "details see bug: bugzilla.redhat.com/show_bug.cgi"
                                  "?id=1165485")
                else:
                    test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                if libvirt_version.version_compare(3, 1, 0) and disk_type == "block":
                    output = to_text(process.system_output(
                        "nsenter -t %d -m -- ls -l %s" % (vm_pid, disks[disk_target]['source'])))
                    owner, group = output.strip().split()[2:4]
                    disk_context = format_user_group_str(owner, group)
                else:
                    stat_re = os.stat(disks[disk_target]['source'])
                    disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        test.fail("The disk label is not equal to "
                                  "'%s'." % sec_label_id)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
def run(test, params, env):
    """
    Test storage pool and volumes with applications such as:
    install vms, attached to vms...
    """
    pool_type = params.get("pool_type")
    pool_name = "test_%s_app" % pool_type
    pool_target = params.get("pool_target")
    emulated_img = params.get("emulated_image", "emulated-image")
    volume_count = int(params.get("volume_count", 1))
    volume_size = params.get("volume_size", "1G")
    emulated_size = "%sG" % (volume_count * int(volume_size[:-1]) + 1)
    application = params.get("application", "install")
    disk_target = params.get("disk_target", "vdb")
    test_message = params.get("test_message", "")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    block_device = params.get("block_device", "/DEV/EXAMPLE")
    if application == "install":
        cdrom_path = os.path.join(data_dir.get_data_dir(),
                                  params.get("cdrom_cd1"))
        if not os.path.exists(cdrom_path):
            raise error.TestNAError("Can't find installation cdrom:%s"
                                    % cdrom_path)
        # Get a nonexist domain name
        vm_name = "vol_install_test"

    try:
        pvtest = utlv.PoolVolumeTest(test, params)
        pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                        image_size=emulated_size, pre_disk_vol=[volume_size],
                        device_name=block_device)

        logging.debug("Current pools:\n%s",
                      libvirt_storage.StoragePool().list_pools())

        new_pool = libvirt_storage.PoolVolume(pool_name)
        if pool_type == "disk":
            volumes = new_pool.list_volumes()
            logging.debug("Current volumes:%s", volumes)
        else:
            volumes = create_volumes(new_pool, volume_count, volume_size)
        if application == "attach":
            vm = env.get_vm(vm_name)
            session = vm.wait_for_login()
            virsh.attach_disk(vm_name, volumes.values()[volume_count - 1],
                              disk_target)
            vm_attach_device = "/dev/%s" % disk_target
            if session.cmd_status("which parted"):
                # No parted command, check device only
                if session.cmd_status("ls %s" % vm_attach_device):
                    raise error.TestFail("Didn't find attached device:%s"
                                         % vm_attach_device)
                return
            # Test if attached disk can be used normally
            utlv.mk_part(vm_attach_device, session=session)
            session.cmd("mkfs.ext4 %s1" % vm_attach_device)
            session.cmd("mount %s1 /mnt" % vm_attach_device)
            session.cmd("echo %s > /mnt/test" % test_message)
            output = session.cmd_output("cat /mnt/test").strip()
            if output != test_message:
                raise error.TestFail("%s cannot be used normally!"
                                     % vm_attach_device)
        elif application == "install":
            # Get a nonexist domain name anyway
            while virsh.domain_exists(vm_name):
                vm_name += "_test"
            # Prepare installation parameters
            params["main_vm"] = vm_name
            vm = env.create_vm("libvirt", None, vm_name, params,
                               test.bindir)
            env.register_vm(vm_name, vm)
            params["image_name"] = volumes.values()[volume_count - 1]
            params["image_format"] = "raw"
            params['force_create_image'] = "yes"
            params['remove_image'] = "yes"
            params['shutdown_cleanly'] = "yes"
            params['shutdown_cleanly_timeout'] = 120
            params['guest_port_unattended_install'] = 12323
            params['inactivity_watcher'] = "error"
            params['inactivity_treshold'] = 1800
            params['image_verify_bootable'] = "no"
            params['unattended_delivery_method'] = "cdrom"
            params['drive_index_unattended'] = 1
            params['drive_index_cd1'] = 2
            params['boot_once'] = "d"
            params['medium'] = "cdrom"
            params['wait_no_ack'] = "yes"
            params['image_raw_device'] = "yes"
            params['backup_image_before_testing'] = "no"
            params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                       "console=ttyS0,115200 console=tty0")
            params['cdroms'] = "unattended cd1"
            params['redirs'] += " unattended_install"
            selinux_mode = None
            try:
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")
                try:
                    unattended_install.run(test, params, env)
                except process.CmdError, detail:
                    raise error.TestFail("Guest install failed:%s" % detail)
            finally:
                if selinux_mode is not None:
                    utils_selinux.set_status(selinux_mode)
                env.unregister_vm(vm_name)
    finally:
        try:
            if application == "install":
                if virsh.domain_exists(vm_name):
                    virsh.remove_domain(vm_name)
            elif application == "attach":
                virsh.detach_disk(vm_name, disk_target)
        finally:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img,
                                device_name=block_device)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
                             "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in disks.values():
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            o_r = utils_selinux.verify_defcon(pathname=disk_path,
                                              readonly=False,
                                              forcedesc=True)
            orig_label_type = backup_labels_of_disks[disk_path].split(":")[2]
            if o_r and (orig_label_type != img_label_type):
                raise error.TestFail("change disk label(%s) failed" % img_label_type)
            os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined or
                security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after "
                                     "starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s"
                                     % (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s."
                                     % (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    raise error.TestFail("Label of disk is not relabeled by "
                                         "VM\nDetal: disk_context="
                                         "%s, imagelabel=%s"
                                         % (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                # Bug 547546 - RFE: the security drivers must remember original
                # permissions/labels and restore them after
                # https://bugzilla.redhat.com/show_bug.cgi?id=547546

                err_msg = "Label of disk is not restored in VM shuting down.\n"
                err_msg += "Detail: img_label_after=%s, " % img_label_after
                err_msg += "img_label_before=%s.\n" % img_label
                err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=547546"
                raise error.TestFail(err_msg)
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in backup_ownership_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined or
                security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test successed in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail(
                    "Label of VM is not expected after starting.\n"
                    "Detail: vm_context=%s, sec_label=%s" %
                    (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s." %
                                     (disk_context, img_label))
            if sec_relabel == "yes":
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()['imagelabel']
                if not disk_context == imagelabel:
                    raise error.TestFail(
                        "Label of disk is not relabeled by VM\n"
                        "Detal: disk_context=%s, imagelabel=%s" %
                        (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            vm.destroy()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                raise error.TestFail(
                    "Bug: Label of disk is not restored in VM "
                    "shuting down.\n"
                    "Detail: img_label_after=%s, "
                    "img_label_before=%s.\n" % (img_label_after, img_label))
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Exemple #46
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    storage = params.get('output_storage')
    storage_name = params.get('storage_name')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = params.get('v2v_opts', '-v -x')
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_check = 'yes' == params.get('skip_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = ['floppy', 'floppy_devmap', 'fstab_cdrom',
                   'sata_disk', 'network_rtl8139', 'network_e1000',
                   'spice', 'spice_encrypt', 'spice_qxl',
                   'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
                   'listen_none', 'listen_socket', 'only_net', 'only_br']
    error_list = []

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unspported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {'uri': remote_uri,
                       'remote_ip': source_ip,
                       'remote_user': source_user,
                       'remote_pwd': source_pwd,
                       'debug': True}
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """

        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                logging.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        logging.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            logging.debug("Disks in VM: %s", output)
            disks = len(re.findall('Disk\s\d', output))
        logging.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            logging.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        logging.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall('(\* kernel.*?\/boot\/config){1,}', v2v_output, re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        logging.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall('((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i)
            logging.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd('rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines()
        logging.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error('Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after convertion
        """
        blk = vmcheck.session.cmd('lsblk')
        logging.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file',
                'type': type, 'targetbus': bus[type]}
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        logging.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev', dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        logging.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, virsh_instance):
        """
        Check if number and type of network cards meet expectation
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        iflist = vmxml.get_iface_all()
        logging.debug('MAC list before v2v: %s' % mac_list)
        logging.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {'root': 'e2label %s ROOT',
                   'swap': 'swaplabel -L SWAPPER %s'}
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            logging.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM',
                'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            logging.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        logging.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_df = "df -m / --output=avail"
        df_output = session.cmd(cmd_df).strip()
        logging.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        logging.info('Available space: %dM' % avail)
        if avail > left_space - 1:
            tmp_dir = data_dir.get_tmp_dir()
            if session.cmd_status('ls %s' % tmp_dir) != 0:
                session.cmd('mkdir %s' % tmp_dir)
            large_file = os.path.join(tmp_dir, 'file.large')
            cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                         (large_file, avail - left_space + 2)
            session.cmd(cmd_create, timeout=v2v_timeout)
        logging.info('Available space: %sM' % session.cmd(cmd_df).strip())

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        logging.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            logging.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST; 3min 48s ago
        firewalld_status = session.cmd('systemctl status firewalld.service|grep Active:',
                                       ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group()
        logging.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd('systemctl status '
                                               'firewalld.service|grep Active:',
                                               ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group()
        logging.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Excecute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            logging.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            logging.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        logging.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after convertion.
        """
        logging.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        logging.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        logging.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            logging.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            logging.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s', str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if skip_check:
            logging.info('Skip checking vm after conversion')
        elif not status_error:
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after convertion
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if params.get('skip_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=vmchecker.virsh_instance)
            logging.debug(vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removeable devices(floppy, cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'],
                                     vmchecker.virsh_instance)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmxml.get_devices('video')[0].model_type
                    if video_type.lower() != 'qxl':
                        log_fail('Video expect QXL, actual %s' % video_type)
            if checkpoint.startswith('listen'):
                listen_type = vmxml.get_devices('graphics')[0].listen_type
                logging.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                logging.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'storage': storage,
            'hostname': source_ip,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3)}
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'output_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({'sasl_user': params.get("sasl_user") +
                           utils_misc.generate_random_string(3)})
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
        if output_mode == 'local':
            v2v_params['storage'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = 0
            for disk in list(new_xml.get_disk_all().values()):
                if disk.get('device') == 'disk':
                    disk_count += 1
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            logging.info('VM state: %s' %
                         virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            logging.info('VM state: %s' %
                         virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint == 'no_space':
            @vm_shell
            def take_space(**kwargs):
                create_large_file(kwargs['session'], 20)
            take_space()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            create_large_file(session, 1000)
            if checkpoint == 'host_no_space_setcache':
                logging.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            logging.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {'type': 'spice',
                                'passwd': params.get('spice_passwd', 'redhat')}
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {'type': 'spice',
                                      'passwdValidTo': '1970-01-01T00:00:01'}
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                logging.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    logging.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find(
                        'devices').find('video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [video.attrib.pop(attr_i) for attr_i in [
                            'ram', 'vgamem'] if attr_i in video.attrib]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find(
                'devices').find('graphics').find('listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            logging.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            logging.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            logging.info('Set service chronyd on')
            cmd = ['yum -y install chrony',
                   'systemctl start chronyd',
                   'chronyc add server %s' % ntp_server]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            logging.info('Sync time with %s', ntp_server)
            cmd = ['yum -y install chrony',
                   'systemctl start chronyd',
                   'chronyc add server %s' % ntp_server,
                   'chronyc waitsync']
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            logging.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            logging.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            logging.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            logging.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            logging.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            logging.info('Attach network')
            virsh.attach_interface(
                vm_name, 'network default --current', debug=True)
            v2v_params.pop('bridge')
        if checkpoint == 'only_br':
            logging.info('Attatch bridge')
            virsh.attach_interface(
                vm_name, 'bridge virbr0 --current', debug=True)
            v2v_params.pop('network')
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            logging.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh:
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status('firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            large_file = os.path.join(data_dir.get_tmp_dir(), 'file.large')
            if os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_undefine_define_host_selinux",
                               "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_undefine_define_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_undefine_define_vm_sec_model", "selinux")
    sec_label = params.get("svirt_undefine_define_vm_sec_label", None)
    sec_relabel = params.get("svirt_undefine_define_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_undefine_define_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    try:
        xml_file = (os.path.join(data_dir.get_tmp_dir(), "vmxml"))
        if vm.is_alive():
            vm.destroy()
        virsh.dumpxml(vm.name, to_file=xml_file)
        cmd_result = virsh.undefine(vm.name)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to undefine vm."
                                 "Detail: %s" % cmd_result)
        cmd_result = virsh.define(xml_file)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to define vm."
                                 "Detail: %s" % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Exemple #48
0
def run(test, params, env):
    """
    convert specific kvm guest to rhev
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes'
    hypervisor = params.get("hypervisor")
    vm_name = params.get('main_vm', 'EXAMPLE')
    target = params.get('target')
    remote_host = params.get('remote_host', 'EXAMPLE')
    input_mode = params.get("input_mode")
    output_mode = params.get('output_mode')
    output_format = params.get('output_format')
    source_user = params.get("username", "root")
    os_pool = storage = params.get('output_storage')
    bridge = params.get('bridge')
    network = params.get('network')
    ntp_server = params.get('ntp_server')
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    address_cache = env.get('address_cache')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = utlv.PoolVolumeTest(test, params)
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on'
                                                            ] else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 3600))
    skip_vm_check = params.get('skip_vm_check', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    checkpoint = params.get('checkpoint', '')
    debug_kernel = 'debug_kernel' == checkpoint
    backup_list = [
        'floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk',
        'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt',
        'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk',
        'listen_none', 'listen_socket', 'only_net', 'only_br'
    ]
    error_list = []

    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')

    # Prepare step for different hypervisor
    if enable_legacy_policy:
        update_crypto_policy("LEGACY")

    if hypervisor == "esx":
        source_ip = params.get("vpx_hostname")
        source_pwd = params.get("vpx_password")
        vpx_passwd_file = params.get("vpx_passwd_file")
        # Create password file to access ESX hypervisor
        with open(vpx_passwd_file, 'w') as f:
            f.write(source_pwd)
    elif hypervisor == "xen":
        source_ip = params.get("xen_hostname")
        source_pwd = params.get("xen_host_passwd")
        # Set up ssh access using ssh-agent and authorized_keys
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip,
                                                              source_user,
                                                              source_pwd,
                                                              auto_close=False)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except Exception as e:
            process.run("ssh-agent -k")
            test.error("Fail to setup ssh-agent \n %s" % str(e))
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        test.cancel("Unsupported hypervisor: %s" % hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    LOG.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'auto_close': True,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        LOG.debug('a new virsh session %s was created', v2v_virsh)
        close_virsh = True
    if not v2v_virsh.domain_exists(vm_name):
        test.error("VM '%s' not exist" % vm_name)

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def vm_shell(func):
        """
        Decorator of shell session to vm
        """
        def wrapper(*args, **kwargs):
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_dead():
                LOG.info('VM is down. Starting it now.')
                vm.start()
            session = vm.wait_for_login()
            kwargs['session'] = session
            kwargs['vm'] = vm
            func(*args, **kwargs)
            if session:
                session.close()
            vm.shutdown()

        return wrapper

    def check_disks(vmcheck):
        """
        Check disk counts inside the VM
        """
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        expected_disks = int(params.get("ori_disks", "1"))
        LOG.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            LOG.debug("Disks in VM: %s", output)
            disks = len(re.findall(r'Disk\s\d', output))
        LOG.debug("Find %s disks in VM after convert", disks)
        if disks == expected_disks:
            LOG.info("Disk counts is expected")
        else:
            log_fail("Disk counts is wrong")

    def check_vmlinuz_initramfs(v2v_output):
        """
        Check if vmlinuz matches initramfs on multi-kernel case
        """
        LOG.debug('Checking if vmlinuz matches initramfs')
        kernel_strs = re.findall(r'(\* kernel.*?\/boot\/config){1,}',
                                 v2v_output, re.DOTALL)
        if len(kernel_strs) == 0:
            test.error("Not find kernel information")

        # Remove duplicate items by set
        LOG.debug('Boots and kernel info: %s' % set(kernel_strs))
        for str_i in set(kernel_strs):
            # Fine all versions
            kernel_vers = re.findall(r'((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)',
                                     str_i)
            LOG.debug('kernel related versions: %s' % kernel_vers)
            # kernel_vers = [kernel, vmlinuz, initramfs] and they should be
            # same
            if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1:
                log_fail("kernel versions does not match: %s" % kernel_vers)

    def check_boot_kernel(vmcheck):
        """
        Check if converted vm use the latest kernel
        """
        _, current_kernel = vmcheck.run_cmd('uname -r')

        if 'debug' in current_kernel:
            log_fail('Current kernel is a debug kernel: %s' % current_kernel)

        # 'sort -V' can satisfy our testing, even though it's not strictly perfect.
        # The last one is always the latest kernel version
        kernel_normal_list = vmcheck.run_cmd(
            'rpm -q kernel | sort -V')[1].strip().splitlines()
        status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug')
        if status != 0:
            test.error('Not found kernel-debug package')
        all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines(
        )
        LOG.debug('All kernels: %s' % all_kernel_list)
        if len(all_kernel_list) < 3:
            test.error(
                'Needs at least 2 normal kernels and 1 debug kernel in VM')

        # The latest non-debug kernel must be kernel_normal_list[-1]
        if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'):
            log_fail('Check boot kernel failed')

    def check_floppy_exist(vmcheck):
        """
        Check if floppy exists after conversion
        """
        blk = vmcheck.session.cmd('lsblk')
        LOG.info(blk)
        if not re.search('fd0', blk):
            log_fail('Floppy not found')

    def attach_removable_media(type, source, dev):
        bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'}
        args = {
            'driver': 'qemu',
            'subdriver': 'raw',
            'sourcetype': 'file',
            'type': type,
            'targetbus': bus[type]
        }
        if type == 'cdrom':
            args.update({'mode': 'readonly'})
        config = ''
        # Join all options together to get command line
        for key in list(args.keys()):
            config += ' --%s %s' % (key, args[key])
        config += ' --current'
        virsh.attach_disk(vm_name, source, dev, extra=config)

    def change_disk_bus(dest):
        """
        Change all disks' bus type to $dest
        """
        bus_list = ['ide', 'sata', 'virtio']
        if dest not in bus_list:
            test.error('Bus type not support')
        dev_prefix = ['h', 's', 'v']
        dev_table = dict(list(zip(bus_list, dev_prefix)))
        LOG.info('Change disk bus to %s' % dest)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all_by_expr('device==disk')
        index = 0
        for disk in list(disks.values()):
            if disk.get('device') != 'disk':
                continue
            target = disk.find('target')
            target.set('bus', dest)
            target.set('dev',
                       dev_table[dest] + 'd' + string.ascii_lowercase[index])
            disk.remove(disk.find('address'))
            index += 1
        vmxml.sync()

    def change_network_model(model):
        """
        Change network model to $model
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        network_list = vmxml.get_iface_all()
        for node in list(network_list.values()):
            if node.get('type') == 'network':
                node.find('model').set('type', model)
        vmxml.sync()

    def attach_network_card(model):
        """
        Attach network card based on model
        """
        if model not in ('e1000', 'virtio', 'rtl8139'):
            test.error('Network model not support')
        options = {'type': 'network', 'source': 'default', 'model': model}
        line = ''
        for key in options:
            line += ' --' + key + ' ' + options[key]
        line += ' --current'
        LOG.debug(virsh.attach_interface(vm_name, option=line))

    def check_multi_netcards(mac_list, vmxml):
        """
        Check if number and type of network cards meet expectation
        """
        xmltree = xml_utils.XMLTreeFile(vmxml)
        iface_nodes = xmltree.find('devices').findall('interface')
        iflist = {}
        for node in iface_nodes:
            mac_addr = node.find('mac').get('address')
            iflist[mac_addr] = node

        LOG.debug('MAC list before v2v: %s' % mac_list)
        LOG.debug('MAC list after  v2v: %s' % list(iflist.keys()))
        if set(mac_list).difference(list(iflist.keys())):
            log_fail('Missing network interface')
        for mac in iflist:
            if iflist[mac].find('model').get('type') != 'virtio':
                log_fail('Network not convert to virtio')

    @vm_shell
    def insert_floppy_devicemap(**kwargs):
        """
        Add an entry of floppy to device.map
        """
        session = kwargs['session']
        line = '(fd0)     /dev/fd0'
        devmap = '/boot/grub/device.map'
        if session.cmd_status('ls %s' % devmap):
            devmap = '/boot/grub2/device.map'
        cmd_exist = 'grep \'(fd0)\' %s' % devmap
        cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
        if session.cmd_status(cmd_exist):
            session.cmd(cmd_set)

    def make_label(session):
        """
        Label a volume, swap or root volume
        """
        # swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
        cmd_map = {
            'root': 'e2label %s ROOT',
            'swap': 'swaplabel -L SWAPPER %s'
        }
        if not session.cmd_status('swaplabel --help'):
            blk = 'swap'
        elif not session.cmd_status('which e2label'):
            blk = 'root'
        else:
            test.error('No tool to make label')
        entry = session.cmd('blkid|grep %s' % blk).strip()
        path = entry.split()[0].strip(':')
        cmd_label = cmd_map[blk] % path
        if 'LABEL' not in entry:
            session.cmd(cmd_label)
        return blk

    @vm_shell
    def specify_fstab_entry(type, **kwargs):
        """
        Specify entry in fstab file
        """
        type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid']
        if type not in type_list:
            test.error('Not support %s in fstab' % type)
        session = kwargs['session']
        # Specify cdrom device
        if type == 'cdrom':
            line = '/dev/cdrom /media/CDROM auto exec'
            if 'grub2' in utils_misc.get_bootloader_cfg(session):
                line += ',nofail'
            line += ' 0 0'
            LOG.debug('fstab entry is "%s"', line)
            cmd = [
                'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM',
                'echo "%s" >> /etc/fstab' % line
            ]
            for i in range(len(cmd)):
                session.cmd(cmd[i])
        elif type == 'sr0':
            line = params.get('fstab_content')
            session.cmd('echo "%s" >> /etc/fstab' % line)
        elif type == 'invalid':
            line = utils_misc.generate_random_string(6)
            session.cmd('echo "%s" >> /etc/fstab' % line)
        else:
            map = {'uuid': 'UUID', 'label': 'LABEL'}
            LOG.info(type)
            if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
                # Specify device by UUID
                if type == 'uuid':
                    entry = session.cmd(
                        'blkid -s UUID|grep swap').strip().split()
                    # Replace path for UUID
                    origin = entry[0].strip(':')
                    replace = entry[1].replace('"', '')
                # Specify device by label
                elif type == 'label':
                    blk = make_label(session)
                    entry = session.cmd('blkid|grep %s' % blk).strip()
                    # Remove " from LABEL="****"
                    replace = entry.split()[1].strip().replace('"', '')
                    # Replace the original id/path with label
                    origin = entry.split()[0].strip(':')
                cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
                session.cmd(cmd_fstab)
        fstab = session.cmd_output('cat /etc/fstab')
        LOG.debug('Content of /etc/fstab:\n%s', fstab)

    def create_large_file(session, left_space):
        """
        Create a large file to make left space of root less than $left_space MB
        """
        cmd_guestfish = "guestfish get-cachedir"
        tmp_dir = session.cmd_output(cmd_guestfish).split()[-1]
        LOG.debug('Command output of tmp_dir: %s', tmp_dir)
        cmd_df = "df -m %s --output=avail" % tmp_dir
        df_output = session.cmd(cmd_df).strip()
        LOG.debug('Command output: %s', df_output)
        avail = int(df_output.strip().split('\n')[-1])
        LOG.info('Available space: %dM' % avail)
        if avail <= left_space - 1:
            return None
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        large_file = os.path.join(tmp_dir, 'file.large')
        cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
                     (large_file, avail - left_space + 2)
        session.cmd(cmd_create, timeout=v2v_timeout)
        newAvail = int(session.cmd(cmd_df).strip().split('\n')[-1])
        LOG.info('New Available space: %sM' % newAvail)
        return large_file

    @vm_shell
    def corrupt_rpmdb(**kwargs):
        """
        Corrupt rpm db
        """
        session = kwargs['session']
        # If __db.* exist, remove them, then touch _db.001 to corrupt db.
        if not session.cmd_status('ls /var/lib/rpm/__db.001'):
            session.cmd('rm -f /var/lib/rpm/__db.*')
        session.cmd('touch /var/lib/rpm/__db.001')
        if not session.cmd_status('yum update'):
            test.error('Corrupt rpmdb failed')

    @vm_shell
    def grub_serial_terminal(**kwargs):
        """
        Edit the serial and terminal lines of grub.conf
        """
        session = kwargs['session']
        vm = kwargs['vm']
        grub_file = utils_misc.get_bootloader_cfg(session)
        if 'grub2' in grub_file:
            test.cancel('Skip this case on grub2')
        cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
        cmd += "terminal -timeout=10 serial console' %s" % grub_file
        session.cmd(cmd)

    @vm_shell
    def set_selinux(value, **kwargs):
        """
        Set selinux stat of guest
        """
        session = kwargs['session']
        current_stat = session.cmd_output('getenforce').strip()
        LOG.debug('Current selinux status: %s', current_stat)
        if current_stat != value:
            cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
            LOG.info('Set selinux stat with command %s', cmd)
            session.cmd(cmd)

    @vm_shell
    def get_firewalld_status(**kwargs):
        """
        Return firewalld service status of vm
        """
        session = kwargs['session']
        # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST;
        # 3min 48s ago
        firewalld_status = session.cmd(
            'systemctl status firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld: %s', firewalld_status)
        params[checkpoint] = firewalld_status

    def check_firewalld_status(vmcheck, expect_status):
        """
        Check if status of firewalld meets expectation
        """
        firewalld_status = vmcheck.session.cmd(
            'systemctl status '
            'firewalld.service|grep Active:',
            ok_status=[0, 3]).strip()
        # Exclude the time string because time changes if vm restarts
        firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)',
                                     firewalld_status).group()
        LOG.info('Status of firewalld after v2v: %s', firewalld_status)
        if firewalld_status != expect_status:
            log_fail('Status of firewalld changed after conversion')

    @vm_shell
    def vm_cmd(cmd_list, **kwargs):
        """
        Execute a list of commands on guest.
        """
        session = kwargs['session']
        for cmd in cmd_list:
            LOG.info('Send command "%s"', cmd)
            # 'chronyc waitsync' needs more than 2mins to sync clock,
            # We set timeout to 300s will not have side-effects for other
            # commands.
            status, output = session.cmd_status_output(cmd, timeout=300)
            LOG.debug('Command output:\n%s', output)
            if status != 0:
                test.error('Command "%s" failed' % cmd)
        LOG.info('All commands executed')

    def check_time_keep(vmcheck):
        """
        Check time drift after conversion.
        """
        LOG.info('Check time drift')
        output = vmcheck.session.cmd('chronyc tracking')
        LOG.debug(output)
        if 'Not synchronised' in output:
            log_fail('Time not synchronised')
        lst_offset = re.search('Last offset *?: *(.*) ', output).group(1)
        drift = abs(float(lst_offset))
        LOG.debug('Time drift is: %f', drift)
        if drift > 3:
            log_fail('Time drift exceeds 3 sec')

    def check_boot():
        """
        Check if guest can boot up after configuration
        """
        try:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get('address_cache'))
            if vm.is_alive():
                vm.shutdown()
            LOG.info('Booting up %s' % vm_name)
            vm.start()
            vm.wait_for_login()
            vm.shutdown()
            LOG.info('%s is down' % vm_name)
        except Exception as e:
            test.error('Bootup guest and login failed: %s' % str(e))

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error:
            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            if output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            LOG.debug(vmchecker.vmxml)
            if checkpoint == 'multi_kernel':
                check_boot_kernel(vmchecker.checker)
                check_vmlinuz_initramfs(output)
            if checkpoint == 'floppy':
                # Convert to rhv will remove all removable devices(floppy,
                # cdrom)
                if output_mode in ['local', 'libvirt']:
                    check_floppy_exist(vmchecker.checker)
            if checkpoint == 'multi_disks':
                check_disks(vmchecker.checker)
            if checkpoint == 'multi_netcards':
                check_multi_netcards(params['mac_address'], vmchecker.vmxml)
            if checkpoint.startswith(('spice', 'vnc')):
                if checkpoint == 'spice_encrypt':
                    vmchecker.check_graphics(params[checkpoint])
                else:
                    graph_type = checkpoint.split('_')[0]
                    vmchecker.check_graphics({'type': graph_type})
                    video_type = vmchecker.xmltree.find(
                        './devices/video/model').get('type')
                    if utils_v2v.multiple_versions_compare(
                            V2V_ADAPTE_SPICE_REMOVAL_VER):
                        expect_video_type = 'vga'
                    else:
                        expect_video_type = 'qxl'

                    if video_type.lower() != expect_video_type:
                        log_fail('Video expect %s, actual %s' %
                                 (expect_video_type, video_type))
            if checkpoint.startswith('listen'):
                listen_type = vmchecker.xmltree.find(
                    './devices/graphics/listen').get('type')
                LOG.info('listen type is: %s', listen_type)
                if listen_type != checkpoint.split('_')[-1]:
                    log_fail('listen type changed after conversion')
            if checkpoint.startswith('selinux'):
                status = vmchecker.checker.session.cmd(
                    'getenforce').strip().lower()
                LOG.info('Selinux status after v2v:%s', status)
                if status != checkpoint[8:]:
                    log_fail('Selinux status not match')
            if checkpoint == 'check_selinuxtype':
                expect_output = vmchecker.checker.session.cmd(
                    'cat /etc/selinux/config')
                expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$',
                                               expect_output,
                                               re.MULTILINE).group(1)
                actual_output = vmchecker.checker.session.cmd('sestatus')
                actual_selinuxtype = re.search(
                    r'^Loaded policy name:\s*(\S+)$', actual_output,
                    re.MULTILINE).group(1)
                if actual_selinuxtype != expect_selinuxtype:
                    log_fail('Seliunx type not match')
            if checkpoint == 'guest_firewalld_status':
                check_firewalld_status(vmchecker.checker, params[checkpoint])
            if checkpoint in ['ntpd_on', 'sync_ntp']:
                check_time_keep(vmchecker.checker)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'target': target,
            'hypervisor': hypervisor,
            'main_vm': vm_name,
            'input_mode': input_mode,
            'network': network,
            'bridge': bridge,
            'os_storage': storage,
            'os_pool': os_pool,
            'hostname': source_ip,
            'password': source_pwd,
            'v2v_opts': v2v_opts,
            'new_name': vm_name + utils_misc.generate_random_string(3),
            'output_method': output_method,
            'os_storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts,
            'input_transport': input_transport,
            'vcenter_host': source_ip,
            'vcenter_password': source_pwd,
            'vddk_thumbprint': vddk_thumbprint,
            'vddk_libdir': vddk_libdir,
            'vddk_libdir_src': vddk_libdir_src,
            'params': params,
        }
        if vpx_dc:
            v2v_params.update({"vpx_dc": vpx_dc})
        if esx_ip:
            v2v_params.update({"esx_ip": esx_ip})
        output_format = params.get('output_format')
        if output_format:
            v2v_params.update({'of_format': output_format})
        # Build rhev related options
        if output_mode == 'rhev':
            # Create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)
        if output_mode == 'local':
            v2v_params['os_directory'] = data_dir.get_tmp_dir()
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')
        # Set libguestfs environment variable
        utils_v2v.set_libguestfs_backend(params)

        # Save origin graphic type for result checking if source is KVM
        if hypervisor == 'kvm':
            ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            params['ori_graphic'] = ori_vm_xml.xmltreefile.find(
                'devices').find('graphics').get('type')
            params['vm_machine'] = ori_vm_xml.xmltreefile.find(
                './os/type').get('machine')

        backup_xml = None
        # Only kvm guest's xml needs to be backup currently
        if checkpoint in backup_list and hypervisor == 'kvm':
            backup_xml = ori_vm_xml
        if checkpoint == 'multi_disks':
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            disk_count = len(new_xml.get_disk_all_by_expr('device==disk'))
            if disk_count <= 1:
                test.error('Not enough disk devices')
            params['ori_disks'] = disk_count
        if checkpoint == 'sata_disk':
            change_disk_bus('sata')
        if checkpoint.startswith('floppy'):
            if params['vm_machine'] and 'q35' in params['vm_machine'] and int(
                    re.search(r'pc-q35-rhel(\d+)\.',
                              params['vm_machine']).group(1)) >= 8:
                test.cancel(
                    'Device isa-fdc is not supported with machine type %s' %
                    params['vm_machine'])
            img_path = data_dir.get_tmp_dir() + '/floppy.img'
            utlv.create_local_disk('floppy', img_path)
            attach_removable_media('floppy', img_path, 'fda')
            if checkpoint == 'floppy_devmap':
                insert_floppy_devicemap()
        if checkpoint.startswith('fstab'):
            if checkpoint == 'fstab_cdrom':
                img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
                utlv.create_local_disk('iso', img_path)
                attach_removable_media('cdrom', img_path, 'hdc')
            specify_fstab_entry(checkpoint[6:])
        if checkpoint == 'running':
            virsh.start(vm_name)
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'paused':
            virsh.start(vm_name, '--paused')
            LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip())
        if checkpoint == 'serial_terminal':
            grub_serial_terminal()
            check_boot()
        if checkpoint.startswith('host_no_space'):
            session = aexpect.ShellSession('sh')
            large_file = create_large_file(session, 800)
            if checkpoint == 'host_no_space_setcache':
                LOG.info('Set LIBGUESTFS_CACHEDIR=/home')
                os.environ['LIBGUESTFS_CACHEDIR'] = '/home'
        if checkpoint == 'corrupt_rpmdb':
            corrupt_rpmdb()
        if checkpoint.startswith('network'):
            change_network_model(checkpoint[8:])
        if checkpoint == 'multi_netcards':
            params['mac_address'] = []
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=v2v_virsh)
            network_list = vmxml.get_iface_all()
            for mac in network_list:
                if network_list[mac].get('type') in ['bridge', 'network']:
                    params['mac_address'].append(mac)
            if len(params['mac_address']) < 2:
                test.error('Not enough network interface')
            LOG.debug('MAC address: %s' % params['mac_address'])
        if checkpoint.startswith(('spice', 'vnc')):
            if checkpoint == 'spice_encrypt':
                spice_passwd = {
                    'type': 'spice',
                    'passwd': params.get('spice_passwd', 'redhat')
                }
                vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
                params[checkpoint] = {
                    'type': 'spice',
                    'passwdValidTo': '1970-01-01T00:00:01'
                }
            else:
                graphic_video = checkpoint.split('_')
                graphic = graphic_video[0]
                LOG.info('Set graphic type to %s', graphic)
                vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic})
                if len(graphic_video) > 1:
                    video_type = graphic_video[1]
                    LOG.info('Set video type to %s', video_type)
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    video = vmxml.xmltreefile.find('devices').find(
                        'video').find('model')
                    video.set('type', video_type)
                    # cirrus doesn't support 'ram' and 'vgamem' attribute
                    if video_type == 'cirrus':
                        [
                            video.attrib.pop(attr_i)
                            for attr_i in ['ram', 'vgamem']
                            if attr_i in video.attrib
                        ]
                    vmxml.sync()
        if checkpoint.startswith('listen'):
            listen_type = checkpoint.split('_')[-1]
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            listen = vmxml.xmltreefile.find('devices').find('graphics').find(
                'listen')
            listen.set('type', listen_type)
            vmxml.sync()
        if checkpoint == 'host_selinux_on':
            params['selinux_stat'] = utils_selinux.get_status()
            utils_selinux.set_status('enforcing')
        if checkpoint.startswith('selinux'):
            set_selinux(checkpoint[8:])
        if checkpoint.startswith('host_firewalld'):
            service_mgr = service.ServiceManager()
            LOG.info('Backing up firewall services status')
            params['bk_firewalld_status'] = service_mgr.status('firewalld')
            if 'start' in checkpoint:
                service_mgr.start('firewalld')
            if 'stop' in checkpoint:
                service_mgr.stop('firewalld')
        if checkpoint == 'guest_firewalld_status':
            get_firewalld_status()
        if checkpoint == 'remove_securetty':
            LOG.info('Remove /etc/securetty file from guest')
            cmd = ['rm -f /etc/securetty']
            vm_cmd(cmd)
        if checkpoint == 'ntpd_on':
            LOG.info('Set service chronyd on')
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server
            ]
            vm_cmd(cmd)
        if checkpoint == 'sync_ntp':
            LOG.info('Sync time with %s', ntp_server)
            cmd = [
                'yum -y install chrony', 'systemctl start chronyd',
                'chronyc add server %s' % ntp_server, 'chronyc waitsync'
            ]
            vm_cmd(cmd)
        if checkpoint == 'blank_2nd_disk':
            disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img')
            LOG.info('Create blank disk %s', disk_path)
            process.run('truncate -s 1G %s' % disk_path)
            LOG.info('Attach blank disk to vm')
            attach_removable_media('disk', disk_path, 'vdc')
            LOG.debug(virsh.dumpxml(vm_name))
        if checkpoint in ['only_net', 'only_br']:
            LOG.info('Detatch all networks')
            virsh.detach_interface(vm_name, 'network --current', debug=True)
            LOG.info('Detatch all bridges')
            virsh.detach_interface(vm_name, 'bridge --current', debug=True)
        if checkpoint == 'only_net':
            LOG.info('Attach network')
            virsh.attach_interface(vm_name,
                                   'network default --current',
                                   debug=True)
        if checkpoint == 'only_br':
            LOG.info('Attatch bridge')
            virsh.attach_interface(vm_name,
                                   'bridge virbr0 --current',
                                   debug=True)
        if checkpoint == 'no_libguestfs_backend':
            os.environ.pop('LIBGUESTFS_BACKEND')
        if checkpoint == 'file_image':
            vm = env.get_vm(vm_name)
            disk = vm.get_first_disk_devices()
            LOG.info('Disk type is %s', disk['type'])
            if disk['type'] != 'file':
                test.error('Guest is not with file image')
        v2v_result = utils_v2v.v2v_cmd(v2v_params)
        if v2v_params.get('new_name'):
            vm_name = params['main_vm'] = v2v_params['new_name']
        check_result(v2v_result, status_error)
    finally:
        if close_virsh and v2v_virsh:
            LOG.debug('virsh session %s is closing', v2v_virsh)
            v2v_virsh.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if enable_legacy_policy:
            update_crypto_policy()
        if hypervisor == "xen":
            utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
            process.run('ssh-agent -k')
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if backup_xml:
            backup_xml.sync()
        if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
            utils_selinux.set_status(params['selinux_stat'])
        if 'bk_firewalld_status' in params:
            service_mgr = service.ServiceManager()
            if service_mgr.status(
                    'firewalld') != params['bk_firewalld_status']:
                if params['bk_firewalld_status']:
                    service_mgr.start('firewalld')
                else:
                    service_mgr.stop('firewalld')
        if checkpoint.startswith('host_no_space'):
            if large_file and os.path.isfile(large_file):
                os.remove(large_file)
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        if not utils_misc.wait_for(_check_state, timeout):
            raise error.TestFail("Guest does not paused, it is %s now" %
                                 vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    raise error.TestFail("Failed to get expect result, get %s"
                                         % output.stdout.strip())
            else:
                raise error.TestFail("Fail to get domblkerror info:%s" %
                                     output.stderr)
    finally:
        logging.info("Do clean steps")
        if error_type == "unspecified error":
            nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            utils.run("umount %s" % nfs_dir)
            utils_selinux.set_status(selinux_bak)
        elif error_type == "no space":
            vm.destroy()
            _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
Exemple #50
0
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        if snapshot_name:
            backup_xml.sync("--snapshots-metadata")
        else:
            backup_xml.sync()
        for i in disk_snap_path:
            if i and os.path.exists(i):
                os.unlink(i)
        for path, label in backup_labels_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except error.TestFail, detail:
                logging.error(str(detail))
        utils_selinux.set_status(backup_sestatus)
        libvirtd.restart()
def run(test, params, env):
    """
    Test DAC in save/restore domain to nfs pool.

    (1).Init variables for test.
    (2).Create nfs pool
    (3).Start VM and check result.
    (4).Save domain to the nfs pool.
    (5).Restore domain from the nfs file.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_save_restore_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options", "rw,async,no_root_squash")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool file variables
    pre_file = "yes" == params.get("pre_file", "yes")
    pre_file_name = params.get("pre_file_name", "dac_nfs_file")
    file_tup = ("file_user", "file_group", "file_mode")
    file_val = []
    for i in file_tup:
        try:
            file_val.append(int(params.get(i)))
        except ValueError:
            test.cancel("%s value '%s' is not a number." %
                        (i, params.get(i)))
    # False positive - file_val was filled in the for loop above.
    # pylint: disable=E0632
    file_user, file_group, file_mode = file_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk mode to avoid fail on local disk
        for disk in list(disks.values()):
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for save/restore
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # Set virt_use_nfs
        result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
        if result.exit_status:
            test.cancel("Failed to set virt_use_nfs value")

        # Create a file on nfs server dir.
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        server_file_path = os.path.join(nfs_path, pre_file_name)
        if pre_file and not os.path.exists(server_file_path):
            open(server_file_path, 'a').close()
        if not pre_file and os.path.exists(server_file_path):
            test.cancel("File %s already exist in pool %s" %
                        (server_file_path, pool_name))

        # Get nfs mount file path
        mnt_path = os.path.join(tmp_dir, pool_target)
        mnt_file_path = os.path.join(mnt_path, pre_file_name)

        # Change img ownership and mode on nfs server dir
        if pre_file:
            os.chown(server_file_path, file_user, file_group)
            os.chmod(server_file_path, file_mode)

        # Start VM.
        try:
            vm.start()
            # Start VM successfully.
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            test.fail("Domain failed to start. "
                      "error: %s" % e)

        label_before = check_ownership(server_file_path)
        if label_before:
            logging.debug("file ownership on nfs server before save: %s" %
                          label_before)

        # Save domain to nfs pool file
        save_re = virsh.save(vm_name, mnt_file_path, debug=True)
        if save_re.exit_status:
            if not status_error:
                test.fail("Failed to save domain to nfs pool file.")
        else:
            if status_error:
                test.fail("Save domain to nfs pool file succeeded, "
                          "expected Fail.")

        label_after = check_ownership(server_file_path)
        if label_after:
            logging.debug("file ownership on nfs server after save: %s"
                          % label_after)

        # Restore domain from the nfs pool file
        if not save_re.exit_status:
            restore_re = virsh.restore(mnt_file_path, debug=True)
            if restore_re.exit_status:
                if not status_error:
                    test.fail("Failed to restore domain from nfs "
                              "pool file.")
            else:
                if status_error:
                    test.fail("Restore domain from nfs pool file "
                              "succeeded, expected Fail.")

            label_after_rs = check_ownership(server_file_path)
            if label_after_rs:
                logging.debug("file ownership on nfs server after restore: %s"
                              % label_after_rs)

    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        utils_selinux.set_status(backup_sestatus)
        qemu_conf.restore()
        libvirtd.restart()
Exemple #52
0
    def export_target(self):
        """
        Export target in localhost for emulated iscsi
        """
        selinux_mode = None

        if not os.path.isfile(self.emulated_image):
            process.system(self.create_cmd)
        else:
            emulated_image_size = os.path.getsize(self.emulated_image) // 1024
            if emulated_image_size != self.emulated_expect_size:
                # No need to remvoe, rebuild is fine
                process.system(self.create_cmd)
        cmd = "tgtadm --lld iscsi --mode target --op show"
        try:
            output = decode_to_text(process.system_output(cmd))
        except process.CmdError:
            restart_tgtd()
            output = decode_to_text(process.system_output(cmd))
        if not re.findall("%s$" % self.target, output, re.M):
            logging.debug("Need to export target in host")

            # Set selinux to permissive mode to make sure iscsi target
            # export successfully
            if utils_selinux.is_enforcing():
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")

            output = decode_to_text(process.system_output(cmd))
            used_id = re.findall("Target\s+(\d+)", output)
            emulated_id = 1
            while str(emulated_id) in used_id:
                emulated_id += 1
            self.emulated_id = str(emulated_id)
            cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id
            cmd += " --lld iscsi --targetname %s" % self.target
            process.system(cmd)
            cmd = "tgtadm --lld iscsi --op bind --mode target "
            cmd += "--tid %s -I ALL" % self.emulated_id
            process.system(cmd)
        else:
            target_strs = re.findall("Target\s+(\d+):\s+%s$" % self.target,
                                     output, re.M)
            self.emulated_id = target_strs[0].split(':')[0].split()[-1]

        cmd = "tgtadm --lld iscsi --mode target --op show"
        try:
            output = decode_to_text(process.system_output(cmd))
        except process.CmdError:  # In case service stopped
            restart_tgtd()
            output = decode_to_text(process.system_output(cmd))

        # Create a LUN with emulated image
        if re.findall(self.emulated_image, output, re.M):
            # Exist already
            logging.debug("Exported image already exists.")
            self.export_flag = True
        else:
            tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target,
                                output, re.DOTALL)
            if tgt_str:
                luns = len(re.findall("\s+LUN:\s(\d+)", tgt_str.group(1),
                                      re.M))
            else:
                luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
            cmd = "tgtadm --mode logicalunit --op new "
            cmd += "--tid %s --lld iscsi " % self.emulated_id
            cmd += "--lun %s " % luns
            cmd += "--backing-store %s" % self.emulated_image
            process.system(cmd)
            self.export_flag = True
            self.luns = luns

        # Restore selinux
        if selinux_mode is not None:
            utils_selinux.set_status(selinux_mode)

        if self.chap_flag:
            # Set CHAP authentication on the exported target
            self.set_chap_auth_target()
            # Set CHAP authentication for initiator to login target
            if self.portal_visible():
                self.set_chap_auth_initiator()
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': vm_sec_model,
                'relabel': vm_sec_relabel}
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel,
                    'sec_label': sec_label}
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    raise error.TestNAError("The label or model not valid, "
                                            "check more info in bug: https://"
                                            "bugzilla.redhat.com/show_bug.cgi"
                                            "?id=1165485")
                else:
                    raise error.TestFail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                f = os.open(disks[disk_target]['source'], 0)
                stat_re = os.fstat(f)
                disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                os.close(f)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        raise error.TestFail("The disk label is not equal to "
                                             "'%s'." % sec_label_id)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Save VM and check the context.
    (4).Restore VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_save_restore_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_save_restore_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_save_restore_vm_sec_model", "selinux")
    sec_label = params.get("svirt_save_restore_vm_sec_label", None)
    sec_relabel = params.get("svirt_save_restore_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get variables about image.
    img_label = params.get('svirt_save_restore_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    # Init a path to save VM.
    save_path = os.path.join(data_dir.get_tmp_dir(), "svirt_save")
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            vm.save_to_file(path=save_path)
            vm.restore_from_file(path=save_path)
            # Save and restore VM successfully.
            if status_error:
                test.fail("Test succeeded in negative case.")
        except virt_vm.VMError as e:
            if not status_error:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                if str(e).count("getfd"):
                    error_msg += ("For more info please refer to"
                                  " https://bugzilla.redhat.com/show_bug.cgi?id=976632")
                test.fail(error_msg)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
def run_svirt_start_destroy(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test successed in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s"
                                     % (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s."
                                     % (disk_context, img_label))
            if sec_relabel == "yes":
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()['imagelabel']
                if not disk_context == imagelabel:
                    raise error.TestFail("Label of disk is not relabeled by VM\n"
                                         "Detal: disk_context=%s, imagelabel=%s"
                                         % (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            vm.destroy()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                raise error.TestFail("Bug: Label of disk is not restored in VM "
                                     "shuting down.\n"
                                     "Detail: img_label_after=%s, "
                                     "img_label_before=%s.\n"
                                     % (img_label_after, img_label))
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)