Esempio n. 1
0
def run(test, params, env):
    """
    Test of libvirt SPICE related features.

    1) Block specified ports if required;
    2) Setup SPICE TLS certification if required;
    3) Setup graphics tag in VM;
    4) Try to start VM;
    5) Parse and check result with expected.
    6) Clean up environment.
    """
    # Since 3.1.0, '-1' is not an valid value for the VNC port while
    # autoport is disabled, it means the VM will be failed to be
    # started as expected. So, cancel test this case since there is
    # one similar test scenario in the negative_test, which is
    # negative_tests.vnc_only.no_autoport.port_-2
    if libvirt_version.version_compare(3, 1, 0) and (params.get("vnc_port")
                                                     == '-1'):
        test.cancel('Cancel this case, since it is equivalence class test '
                    'with case negative_tests.vnc_only.no_autoport.port_-2')

    # Since 2.0.0, there are some changes for listen type and port
    # 1. Two new listen types: 'none' and 'socket'(not covered in this test)
    #    In 'none' listen type, no listen address and port is 0,
    #    that means we need reset the listen_type for previous versions,
    #    so we can get the expect result(vm start fail)
    # 2. Spice port accept negative number less than -1
    #    If spice port less than -1, the VM can start normally, but both
    #    listen address and port will be omitted
    spice_listen_type = params.get('spice_listen_type', 'not_set')
    vnc_listen_type = params.get('vnc_listen_type', 'not_set')
    spice_port = params.get('spice_port', 'not_set')
    spice_tlsPort = params.get('spice_tlsPort', 'not_set')
    if not libvirt_version.version_compare(2, 0, 0):
        for listen_type in [spice_listen_type, vnc_listen_type]:
            if listen_type == 'none':
                params[listen_type] = 'not_set'
    else:
        try:
            if int(spice_port) < -1:
                params["negative_test"] = "no"
        except ValueError:
            pass

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    spice_xml = params.get("spice_xml", "no") == 'yes'
    vnc_xml = params.get("vnc_xml", "no") == 'yes'
    is_negative = params.get("negative_test", "no") == 'yes'
    graphic_passwd = params.get("graphic_passwd")
    remote_viewer_check = params.get("remote_viewer_check", "no") == 'yes'
    valid_time = params.get("valid_time")

    sockets = block_ports(params)
    networks = setup_networks(params, test)

    expected_result = get_expected_results(params, networks)
    env_state = EnvState(params, expected_result)

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    if graphic_passwd:
        spice_passwd_place = params.get("spice_passwd_place", "not_set")
        vnc_passwd_place = params.get("vnc_passwd_place", "not_set")
        if spice_passwd_place == "qemu":
            config['spice_password'] = '******' % graphic_passwd
            libvirtd.restart()
        if vnc_passwd_place == "qemu":
            config['vnc_password'] = '******' % graphic_passwd
            libvirtd.restart()
    try:
        vm_xml.remove_all_graphics()
        if spice_xml:
            spice_graphic = generate_spice_graphic_xml(params, expected_result)
            logging.debug('Test SPICE XML is: %s', spice_graphic)
            vm_xml.devices = vm_xml.devices.append(spice_graphic)
        if vnc_xml:
            vnc_graphic = generate_vnc_graphic_xml(params, expected_result)
            logging.debug('Test VNC XML is: %s', vnc_graphic)
            vm_xml.devices = vm_xml.devices.append(vnc_graphic)
        vm_xml.sync()
        all_ips = utils_net.get_all_ips()

        fail_patts = expected_result['fail_patts']
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not fail_patts:
                test.fail("Expect VM can be started, but failed with: %s" %
                          detail)
            for patt in fail_patts:
                if re.search(patt, str(detail)):
                    return
            test.fail("Expect fail with error in %s, but failed with: %s" %
                      (fail_patts, detail))
        else:
            if fail_patts:
                test.fail("Expect VM can't be started with %s, but started." %
                          fail_patts)

        if spice_xml:
            spice_opts, plaintext_channels, tls_channels = qemu_spice_options(
                vm)
            check_spice_result(spice_opts, plaintext_channels, tls_channels,
                               expected_result, all_ips, test)
            # Use remote-viewer to connect guest
            if remote_viewer_check:
                rv_log_str = params.get("rv_log_str")
                rv_log_auth = params.get("rv_log_auth")
                opt_str = params.get('opt_str')
                check_connection('spice', expected_result['spice_port'],
                                 graphic_passwd, test, rv_log_str, rv_log_auth,
                                 opt_str, valid_time)

        if vnc_xml:
            vnc_opts = qemu_vnc_options(vm)
            check_vnc_result(vnc_opts, expected_result, all_ips, test)
            # Use remote-viewer to connect guest
            if remote_viewer_check:
                rv_log_str = params.get("rv_log_str")
                rv_log_auth = params.get("rv_log_auth")
                opt_str = params.get('opt_str')
                check_connection('vnc', expected_result['vnc_port'],
                                 graphic_passwd, test, rv_log_str, rv_log_auth,
                                 opt_str, valid_time)

        if is_negative:
            test.fail("Expect negative result. But start succeed!")
    except exceptions.TestFail as detail:
        bug_url = params.get('bug_url', None)
        if bug_url:
            logging.error("You probably encountered a known bug. "
                          "Please check %s for reference" % bug_url)
        raise
    finally:
        for sock in sockets:
            sock.close()
        if networks:
            for network in list(networks.values()):
                network.cleanup()
        vm_xml_backup.sync()
        os.system('rm -f /dev/shm/spice*')
        env_state.restore()
        config.restore()
        libvirtd.restart()
Esempio n. 2
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError(
                    "Hugepage size [%s] isn't supported, "
                    "please verify kernel cmdline configuration." %
                    page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    raise error.TestNAError("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
Esempio n. 3
0
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
    shutdown_timeout = int(params.get('shutdown_timeout', 60))

    # define function
    def vm_recover_check(option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            test.fail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive", debug=True)
        vm_state1 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all", debug=True)
        vm_state2 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            test.fail("Guest state should be saved")

        virsh.start(vm_name, debug=True)
        # This time vm should be in the list
        if vm.is_dead():
            test.fail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            test.fail("Guest should be active after" " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            test.fail("Managed save image exist " "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after started"
                              " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain,
            # it should be in running state and can be login.
            vm.shutdown()
            if not vm.wait_for_shutdown(shutdown_timeout):
                test.fail('VM failed to shutdown')
            vm.start()
            vm.wait_for_login()

    def vm_undefine_check(vm_name):
        """
        Check if vm can be undefined with manage-save option
        """
        #backup xml file
        xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        #undefine domain with no options.
        if not virsh.undefine(vm_name, options=None,
                              ignore_status=True).exit_status:
            test.fail("Guest shouldn't be undefined"
                      "while domain managed save image exists")
        #undefine domain with managed-save option.
        if virsh.undefine(vm_name,
                          options="--managed-save",
                          ignore_status=True).exit_status:
            test.fail("Guest can't be undefine with " "managed-save option")

        if os.path.exists(managed_save_file):
            test.fail("Managed save image exists" " after undefining vm")
        #restore and start the vm.
        xml_backup.define()
        vm.start()

    def check_flags_parallel(virsh_cmd, bash_cmd, flags):
        """
        Run the commands parallel and check the output.
        """
        cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
        ret = process.run(cmd,
                          ignore_status=True,
                          shell=True,
                          ignore_bg_processes=True)
        output = ret.stdout_text.strip()
        logging.debug("check flags output: %s" % output)
        lines = re.findall(r"flags:.(\d+)", output, re.M)
        logging.debug("Find all fdinfo flags: %s" % lines)
        lines = [int(i, 8) & flags for i in lines]
        if flags not in lines:
            test.fail("Checking flags %s failed" % flags)

        return ret

    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name,
                                            dst_vm,
                                            True,
                                            timeout=timeout)
            virsh.start(dst_vm, debug=True)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = process.run("cat /proc/1/comm",
                                 shell=True).stdout_text.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest .*?[\n]*.*done'
        else:
            ret = process.run("service libvirt-guests restart | \
                              awk '{ print strftime(\"%b %y %H:%M:%S\"), \
                              $0; fflush(); }'",
                              shell=True)
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 5)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        resume_time = re.findall(pattern, ret.stdout_text, re.M)
        if not resume_time:
            test.fail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [
            time.mktime(time.strptime(tm, "%b %y %H:%M:%S"))
            for tm in resume_time
        ]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds) - 1):
            if resume_seconds[i + 1] - resume_seconds[i] < int(start_delay):
                test.fail("Checking start_delay failed")

    def wait_for_state(vm_state):
        """
        Wait for vm state is ready.
        """
        utils_misc.wait_for(lambda: vm.state() == vm_state, 10)

    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        # form proper parallel command based on if systemd is used or not
        is_systemd = process.run("cat /proc/1/comm",
                                 shell=True).stdout_text.count("systemd")
        if is_systemd:
            virsh_cmd_stop = "systemctl stop libvirt-guests"
            virsh_cmd_start = "systemctl start libvirt-guests"
        else:
            virsh_cmd_stop = "service libvirt-guests stop"
            virsh_cmd_start = "service libvirt-guests start"

        ret = check_flags_parallel(
            virsh_cmd_stop,
            bash_cmd % (managed_save_file, managed_save_file, "1"), flags)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        if all([
                "Suspending %s" % vm_name not in ret.stdout_text,
                "stopped, with saved guests" not in ret.stdout_text
        ]):
            test.fail("Can't see messages of suspending vm")
        # status command should return 3.
        if not is_systemd:
            ret = libvirt_guests.raw_status()
        if ret.exit_status != 3:
            test.fail("The exit code %s for libvirt-guests"
                      " status is not correct" % ret)

        # Wait for VM in shut off state
        wait_for_state("shut off")
        check_flags_parallel(
            virsh_cmd_start,
            bash_cmd % (managed_save_file, managed_save_file, "0"), flags)
        # Wait for VM in running state
        wait_for_state("running")

    def vm_msave_remove_check(vm_name):
        """
        Check managed save remove command.
        """
        if not os.path.exists(managed_save_file) and case not in [
                'not_saved_without_file', 'saved_without_file'
        ]:
            test.fail("Can't find managed save image")
        ret = virsh.managedsave_remove(vm_name, debug=True)
        libvirt.check_exit_status(ret, msave_rm_error)
        if os.path.exists(managed_save_file):
            test.fail("Managed save image still exists")
        virsh.start(vm_name, debug=True)
        # The domain state should be running
        if vm.state() != "running":
            test.fail("Guest state should be" " running after started")

    def vm_managedsave_loop(vm_name, loop_range, libvirtd):
        """
        Run a loop of managedsave command and check its result.
        """
        if vm.is_dead():
            virsh.start(vm_name, debug=True)
        for i in range(int(loop_range)):
            logging.debug("Test loop: %s" % i)
            virsh.managedsave(vm_name, debug=True)
            virsh.start(vm_name, debug=True)
        # Check libvirtd status.
        if not libvirtd.is_running():
            test.fail("libvirtd is stopped after cmd")
        # Check vm status.
        if vm.state() != "running":
            test.fail("Guest isn't in running state")

    def build_vm_xml(vm_name, **dargs):
        """
        Build the new domain xml and define it.
        """
        try:
            # stop vm before doing any change to xml
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if dargs.get("cpu_mode"):
                if "cpu" in vmxml:
                    del vmxml.cpu
                cpuxml = vm_xml.VMCPUXML()
                cpuxml.mode = params.get("cpu_mode", "host-model")
                cpuxml.match = params.get("cpu_match", "exact")
                cpuxml.fallback = params.get("cpu_fallback", "forbid")
                cpu_topology = {}
                cpu_topology_sockets = params.get("cpu_topology_sockets")
                if cpu_topology_sockets:
                    cpu_topology["sockets"] = cpu_topology_sockets
                cpu_topology_cores = params.get("cpu_topology_cores")
                if cpu_topology_cores:
                    cpu_topology["cores"] = cpu_topology_cores
                cpu_topology_threads = params.get("cpu_topology_threads")
                if cpu_topology_threads:
                    cpu_topology["threads"] = cpu_topology_threads
                if cpu_topology:
                    cpuxml.topology = cpu_topology
                vmxml.cpu = cpuxml
                vmxml.vcpu = int(params.get("vcpu_nums"))
            if dargs.get("sec_driver"):
                seclabel_dict = {
                    "type": "dynamic",
                    "model": "selinux",
                    "relabel": "yes"
                }
                vmxml.set_seclabel([seclabel_dict])

            vmxml.sync()
            vm.start()
        except Exception as e:
            logging.error(str(e))
            test.cancel("Build domain xml failed")

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref", "name")
    libvirtd_state = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
    test_undefine = "yes" == params.get("managedsave_undefine", "no")
    test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
    autostart_bypass_cache = params.get("autostart_bypass_cache", "")
    multi_guests = params.get("multi_guests", "")
    test_libvirt_guests = params.get("test_libvirt_guests", "")
    check_flags = "yes" == params.get("check_flags", "no")
    security_driver = params.get("security_driver", "")
    remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
    option = params.get("managedsave_option", "")
    check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
    pre_vm_state = params.get("pre_vm_state", "")
    move_saved_file = "yes" == params.get("move_saved_file", "no")
    test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
    remove_test = 'yes' == params.get('remove_test', 'no')
    case = params.get('case', '')
    msave_rm_error = "yes" == params.get("msave_rm_error", "no")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            test.cancel("Older libvirt does not"
                        " handle arguments consistently")

    # Backup xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Get the libvirtd service
    libvirtd = utils_libvirtd.Libvirtd()
    # Get config files.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirt_guests_config = utils_config.LibvirtGuestsConfig()
    # Get libvirt-guests service
    libvirt_guests = Factory.create_service("libvirt-guests")

    try:
        # Destroy vm first for setting configuration file
        if vm.state() == "running":
            vm.destroy(gracefully=False)
        # Prepare test environment.
        if libvirtd_state == "off":
            libvirtd.stop()
        if autostart_bypass_cache:
            ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True)
            libvirt.check_exit_status(ret)
            qemu_config.auto_start_bypass_cache = autostart_bypass_cache
            libvirtd.restart()
        if security_driver:
            qemu_config.security_driver = [security_driver]
        if test_libvirt_guests:
            if multi_guests:
                start_delay = params.get("start_delay", "20")
                libvirt_guests_config.START_DELAY = start_delay
            if check_flags:
                libvirt_guests_config.BYPASS_CACHE = "1"
            # The config file format should be "x=y" instead of "x = y"
            process.run(
                "sed -i -e 's/ = /=/g' "
                "/etc/sysconfig/libvirt-guests",
                shell=True)
            libvirt_guests.restart()

        # Change domain xml.
        if cpu_mode:
            build_vm_xml(vm_name, cpu_mode=True)
        if security_driver:
            build_vm_xml(vm_name, sec_driver=True)

        # Turn VM into certain state.
        if pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Wait for VM to be in shut off state
            utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, ignore_status=True,
                            debug=True).exit_status:
                vmxml_backup.define()
                test.cancel("Can't create the domain")

        # Wait for vm in stable state
        if params.get("start_vm") == "yes":
            if vm.state() == "shut off":
                vm.start()
                vm.wait_for_login()

        # run test case
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.count("invalid"):
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = vm_name

        # Ignore exception with "ignore_status=True"
        if progress:
            option += " --verbose"
        option += extra_param

        # For bypass_cache test. Run a shell command to check fd flags while
        # executing managedsave command
        software_mgr = software_manager.SoftwareManager()
        if not software_mgr.check_installed('lsof'):
            logging.info('Installing lsof package:')
            software_mgr.install('lsof')
        bash_cmd = (
            "let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
            "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/%s |"
            "grep 'flags:.*') && break; else sleep 0.05; fi; done;")
        # Flags to check bypass cache take effect
        flags = os.O_DIRECT
        if test_bypass_cache:
            # Drop caches.
            drop_caches()
            virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
            check_flags_parallel(
                virsh_cmd,
                bash_cmd % (managed_save_file, managed_save_file, "1"), flags)
            # Wait for VM in shut off state
            wait_for_state("shut off")
            virsh_cmd = "virsh start %s %s" % (option, vm_name)
            check_flags_parallel(
                virsh_cmd,
                bash_cmd % (managed_save_file, managed_save_file, "0"), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests, start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)
        elif remove_test:
            if case == 'not_saved_with_file':
                process.run('touch %s' % managed_save_file,
                            shell=True,
                            verbose=True)
            vm_msave_remove_check(vm_name)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref,
                                    options=option,
                                    ignore_status=True,
                                    debug=True)
            status = ret.exit_status
            # The progress information outputted in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                process.run(cmd, shell=True)
            if case == 'saved_without_file':
                os.remove(managed_save_file)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    if libvirtd_state == "off" and libvirt_version.version_compare(
                            5, 6, 0):
                        logging.info(
                            "From libvirt version 5.6.0 libvirtd is restarted "
                            "and command should succeed")
                    else:
                        test.fail("Run successfully with wrong command!")
            else:
                if status:
                    test.fail("Run failed with right command")
                if progress:
                    if not error_msg.count("Managedsave:"):
                        test.fail("Got invalid progress output")
                if remove_after_cmd or case == 'saved_without_file':
                    vm_msave_remove_check(vm_name)
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    # rhbz#1755303
                    if libvirt_version.version_compare(5, 6, 0):
                        os.remove("/run/libvirt/qemu/autostarted")
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(
                        virsh_cmd,
                        bash_cmd % (managed_save_file, managed_save_file, "0"),
                        flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name,
                            "--disable",
                            ignore_status=True,
                            debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
Esempio n. 4
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    check = params.get('check', '')
    qemu_checks = params.get('qemu_checks', '')

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=4000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def create_mbxml():
        """
        Create memoryBacking xml for test
        """
        mb_params = {k: v for k, v in params.items() if k.startswith('mbxml_')}
        logging.debug(mb_params)
        mb_xml = vm_xml.VMMemBackingXML()
        mb_xml.xml = "<memoryBacking></memoryBacking>"
        for attr_key in mb_params:
            val = mb_params[attr_key]
            logging.debug('Set mb params')
            setattr(mb_xml, attr_key.replace('mbxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(mb_xml)
        return mb_xml.copy()

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {
            k: v
            for k, v in params.items() if k.startswith('cpuxml_')
        }
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_dimm_xml(**mem_param):
        """
        Create xml of dimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            pg_size=int(mem_param['source_pagesize']),
            tg_size=mem_param['target_size'],
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_model="dimm")
        logging.debug(mem_xml)
        return mem_xml.copy()

    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]

    config = utils_config.LibvirtQemuConfig()
    page_size = params.get("page_size")
    discard = params.get("discard")
    setup_hugepages(int(page_size))

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vm = env.get_vm(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Set memoryBacking according to params
        mb_xml = create_mbxml()
        vmxml.mb = mb_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {
            k: params[k]
            for k in params if k.startswith('setvm_')
        }
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name))

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking
            logging.debug(virsh.dumpxml(vm_name))

        if check == "mem_dev" or check == "hot_plug":
            # Add  dimm mem device to vm xml
            dimm_params = {
                k.replace('dimmxml_', ''): v
                for k, v in params.items() if k.startswith('dimmxml_')
            }
            dimm_xml = create_dimm_xml(**dimm_params)
            if params.get('dimmxml_mem_access'):
                dimm_xml.mem_access = dimm_params['mem_access']
            vmxml.add_device(dimm_xml)
            logging.debug(virsh.dumpxml(vm_name))

        test_vm = env.get_vm(vm_name)
        vmxml.sync()
        if test_vm.is_alive():
            test_vm.destroy()

        virsh.start(vm_name, debug=True, ignore_status=False)
        test_vm.wait_for_login()

        if check == 'numa_cell' or check == 'mem_dev':
            # Check qemu command line one by one
            logging.debug("enter check")
            if discard == 'yes':
                libvirt.check_qemu_cmd_line(qemu_checks)
            elif libvirt.check_qemu_cmd_line(qemu_checks, True):
                test.fail("The unexpected [%s] exist in qemu cmd" %
                          qemu_checks)

        if check == 'hot_plug':
            # Add dimm device to vm xml
            dimm_params2 = {
                k.replace('dimmxml2_', ''): v
                for k, v in params.items() if k.startswith('dimmxml2_')
            }
            dimm_xml2 = create_dimm_xml(**dimm_params2)
            if params.get('dimmxml2_mem_access'):
                dimm_xml2.mem_access = dimm_params2['mem_access']

            result = virsh.attach_device(vm_name, dimm_xml2.xml, debug=True)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, dimm_xml2.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be a memory device added
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(
                vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))

            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail(
                    'Number of memory devices after attach is %d, should be %d'
                    % (len(devices_after_attach), len(ori_devices) + 1))

            alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            dimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(dimm_detach)

            # Hot-unplug dimm device
            result = virsh.detach_device(vm_name, dimm_detach.xml, debug=True)
            libvirt.check_exit_status(result)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                time.sleep(60)
                test.fail(
                    'Number of memory devices after detach is %d, should be %d'
                    % (len(left_devices), len(ori_devices)))

    except virt_vm.VMStartError as e:
        test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
Esempio n. 5
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    xattr_check = 'yes' == params.get("xattr_check", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
        "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get variables about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        stat_re = os.stat(disk_path)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in list(disks.values()):
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            try:
                utils_selinux.del_defcon(img_label_type, pathregex=dir_path)
            except Exception as err:
                logging.debug("Delete label failed: %s", err)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=disk_path,
                                        readonly=False,
                                        forcedesc=True)
            if sec_relabel == "no" and sec_type == 'none':
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined
                or security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            # Need another guest to test the xattr added by libvirt
            if xattr_check:
                blklist = virsh.domblklist(vm_name, debug=True)
                target_disk = re.findall(r"[v,s]d[a-z]",
                                         blklist.stdout.strip())[0]
                guest_name = "%s_%s" % (vm_name, '1')
                cmd = "virt-clone --original %s --name %s " % (vm_name,
                                                               guest_name)
                cmd += "--auto-clone --skip-copy=%s" % target_disk
                process.run(cmd, shell=True, verbose=True)
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")
            # Start another vm with the same disk image.
            # The xattr will not be changed.
            if xattr_check:
                virsh.start(guest_name, ignore_status=True, debug=True)
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                test.fail("Label of VM is not expected after "
                          "starting.\n"
                          "Detail: vm_context=%s, sec_label=%s" %
                          (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=list(disks.values())[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                test.fail("Label of disk is not expected after VM "
                          "starting.\n"
                          "Detail: disk_context=%s, img_label=%s." %
                          (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    test.fail("Label of disk is not relabeled by "
                              "VM\nDetal: disk_context="
                              "%s, imagelabel=%s" % (disk_context, imagelabel))
                expected_results = "trusted.libvirt.security.ref_dac=\"1\"\n"
                expected_results += "trusted.libvirt.security.ref_selinux=\"1\""
                cmd = "getfattr -m trusted.libvirt.security -d %s " % list(
                    disks.values())[0]['source']
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            filename = list(disks.values())[0]['source']
            img_label_after = utils_selinux.get_context_of_file(filename)
            stat_re = os.stat(filename)
            ownership_of_disk = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            logging.debug("The ownership of disk after guest starting is:\n")
            logging.debug(ownership_of_disk)
            logging.debug("The ownership of disk before guest starting is:\n")
            logging.debug(backup_ownership_of_disks[filename])
            if not (sec_relabel == "no" and sec_type == 'none'):
                if not libvirt_version.version_compare(5, 6, 0):
                    if img_label_after != img_label:
                        # Bug 547546 - RFE: the security drivers must remember original
                        # permissions/labels and restore them after
                        # https://bugzilla.redhat.com/show_bug.cgi?id=547546
                        err_msg = "Label of disk is not restored in VM shutting down.\n"
                        err_msg += "Detail: img_label_after=%s, " % img_label_after
                        err_msg += "img_label_before=%s.\n" % img_label
                        err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                        err_msg += ".cgi?id=547546"
                        test.fail(err_msg)
                elif (img_label_after != img_label or ownership_of_disk !=
                      backup_ownership_of_disks[filename]):
                    err_msg = "Label of disk is not restored in VM shutting down.\n"
                    err_msg += "Detail: img_label_after=%s, %s " % (
                        img_label_after, ownership_of_disk)
                    err_msg += "img_label_before=%s, %s\n" % (
                        img_label, backup_ownership_of_disks[filename])
                    test.fail(err_msg)
            # The xattr should be cleaned after guest shutoff.
            cmd = "getfattr -m trusted.libvirt.security -d %s " % list(
                disks.values())[0]['source']
            utils_test.libvirt.check_cmd_output(cmd, content="")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            try:
                utils_selinux.del_defcon(img_label_type, pathregex=dir_path)
            except Exception as err:
                logging.debug("Delete label failed: %s", err)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in list(backup_ownership_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        if xattr_check:
            virsh.undefine(guest_name, ignore_status=True)
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined
                or security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Esempio n. 6
0
def run(test, params, env):
    """
    Test stdio_handler parameter in qemu.conf to use for handling stdout/stderr
    output from QEMU processes.

    1) Change stdio_handler in qemu.conf;
    2) Restart libvirtd daemon;
    3) Check if libvirtd successfully started;
    4) Check if virtlogd.socket is running;
    5) Configure pty serial and console;
    6) Check if VM log file exists and has correct permission and owner;
    7) Check if VM log file is opened by virtlogd;
    8) Check if VM start log is written into VM log file correctly;
    9) Check if QEMU use pipe provided by virtlogd daemon for logging;
    10) Check if VM shutdown log is written into VM log file correctly;
    11) Check if pipe file can be closed gracefully after VM shutdown;
    12) Check if VM restart log can be appended to the end of previous log file;
    """
    def clean_up_vm_log_file(vm_name):
        """Clean up VM log file."""
        # Delete VM log file if exists.
        global QEMU_LOG_PATH
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)
        if os.path.exists(guest_log_file):
            os.remove(guest_log_file)

    def configure(cmd, guest_log_file=None, errorMsg=None):
        """
        Configure qemu log.
        :param cmd. execute command string.
        :param guest_log_file. the path of VM log file.
        :param errorMsg. error message if failed
        :return: pipe node.
        """
        # If guest_log_file is not None,check if VM log file exists or not.
        if guest_log_file and not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        # If errorMsg is not None, check command running result.
        elif errorMsg:
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.error(errorMsg)
        # Get pipe node.
        else:
            result = process.run(cmd,
                                 timeout=90,
                                 ignore_status=True,
                                 shell=True)
            ret, output = result.exit_status, result.stdout_text
            if ret:
                test.fail("Failed to get pipe node")
            else:
                return output

    def configure_serial_console(vm_name):
        """Configure serial console"""
        # Check the primary serial and set it to pty.
        VMXML.set_primary_serial(vm_name, 'pty', '0', None)
        # Configure VM pty console.
        vm_pty_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        vm_pty_xml.remove_all_device_by_type('console')

        console = Console()
        console.target_port = '0'
        console.target_type = 'serial'
        vm_pty_xml.add_device(console)
        vm_pty_xml.sync()

    def check_vm_log_file_permission_and_owner(vm_name):
        """Check VM log file permission and owner."""
        # Check VM log file permission.
        global QEMU_LOG_PATH
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)
        logging.info("guest log file: %s", guest_log_file)
        if not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        permission = oct(stat.S_IMODE(os.lstat(guest_log_file).st_mode))
        if permission != '0600' and permission != '0o600':
            test.fail(
                "VM log file: %s expect to get permission:0600, got %s ." %
                (guest_log_file, permission))
        # Check VM log file owner.
        owner = getpwuid(stat.S_IMODE(os.lstat(guest_log_file).st_uid)).pw_name
        if owner != 'root':
            test.fail("VM log file: %s expect to get owner:root, got %s ." %
                      (guest_log_file, owner))

    def check_info_in_vm_log_file(vm_name, cmd=None, matchedMsg=None):
        """
        Check if log information is written into log file correctly.
        """
        # Check VM log file is opened by virtlogd.
        global QEMU_LOG_PATH
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)
        if not os.path.exists(guest_log_file):
            test.fail("Expected VM log file: %s not exists" % guest_log_file)

        if cmd is None:
            cmd = ("grep -nr '%s' %s" % (matchedMsg, guest_log_file))
        else:
            cmd = (cmd + " %s |grep '%s'" % (guest_log_file, matchedMsg))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Failed to get VM started log from VM log file: %s." %
                      guest_log_file)

    def check_pipe_closed(pipe_node):
        """
        Check pipe used by QEMU is closed gracefully after VM shutdown.
        """
        # Check pipe node can not be listed after VM shutdown.
        cmd = ("lsof  -w |grep pipe|grep virtlogd|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True,
                           shell=True).exit_status:
            test.fail("pipe node: %s is not closed in virtlogd gracefully." %
                      pipe_node)

        cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True,
                           shell=True).exit_status:
            test.fail("pipe node: %s is not closed in qemu gracefully." %
                      pipe_node)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "virtlogd_disabled")
    stdio_handler = params.get("stdio_handler", "not_set")
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if stdio_handler != 'not_set':
            config['stdio_handler'] = "'%s'" % stdio_handler
        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with stdio_handler=%s' % stdio_handler)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with stdio_handler=%s' % stdio_handler)

        # Stop all VMs if VMs are already started.
        for tmp_vm in env.get_all_vms():
            if tmp_vm.is_alive():
                tmp_vm.destroy(gracefully=False)

        # Remove VM previous log file.
        clean_up_vm_log_file(vm_name)

        # Check if virtlogd socket is running.
        cmd = ("systemctl status virtlogd.socket|grep 'Active: active'")
        configure(cmd, errorMsg="virtlogd.socket is not running")

        # Configure serial console.
        configure_serial_console(vm_name)

        logging.info("final vm:")
        logging.info(VMXML.new_from_inactive_dumpxml(vm_name))

        # Start VM.
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("VM failed to start." "Error: %s" % str(detail))
        # Check VM log file has right permission and owner.
        check_vm_log_file_permission_and_owner(vm_name)
        utils_package.package_install(['lsof'])
        # Check VM log file is opened by virtlogd.
        cmd = ("lsof -w %s|grep 'virtlogd'" % guest_log_file)
        errorMessage = "VM log file: %s is not opened by:virtlogd." % guest_log_file
        configure(cmd, guest_log_file, errorMessage)

        # Check VM started log is written into log file correctly.
        check_info_in_vm_log_file(
            vm_name, matchedMsg="char device redirected to /dev/pts")

        # Get pipe node opened by virtlogd for VM log file.
        cmd = ("lsof  -w |grep pipe|grep virtlogd|tail -n 1|awk '{print $9}'")
        pipe_node = configure(cmd)

        # Check if qemu-kvm use pipe node provided by virtlogd.
        cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
        errorMessage = ("Can not find matched pipe node: %s "
                        "from pipe list used by qemu-kvm." % pipe_node)
        configure(cmd, errorMsg=errorMessage)

        # Shutdown VM.
        if not vm.shutdown():
            vm.destroy(gracefully=True)

        # Check VM shutdown log is written into log file correctly.
        check_info_in_vm_log_file(vm_name, matchedMsg="shutting down")

        # Check pipe is closed gracefully after VM shutdown.
        check_pipe_closed(pipe_node)

        # Start VM again.
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("VM failed to start." "Error: %s" % str(detail))
        # Check the new VM start log is appended to the end of the VM log file.
        check_info_in_vm_log_file(
            vm_name,
            cmd="tail -n 5",
            matchedMsg="char device redirected to /dev/pts")

    finally:
        config.restore()
        libvirtd.restart()
        vm_xml_backup.sync()
Esempio n. 7
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            test.cancel("%s value '%s' is not a number." % (i, params.get(i)))
    # False positive - img_val was filled in the for loop above.
    # pylint: disable=E0632
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vm_os_xml = vmxml.os

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        label = check_ownership(disk_path)
        if label:
            backup_labels_of_disks[disk_path] = label

    try:
        if vm_os_xml.nvram:
            nvram_path = vm_os_xml.nvram
            if not os.path.exists(nvram_path):
                # Need libvirt automatically generate the path
                vm.start()
                vm.destroy(gracefully=False)
            label = check_ownership(nvram_path)
            if label:
                backup_labels_of_disks[nvram_path] = label
    except xcepts.LibvirtXMLNotFoundError:
        logging.debug("vm xml don't have nvram element")

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for file_path in list(backup_labels_of_disks.keys()):
            if qemu_user == "root":
                os.chown(file_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(file_path, 107, 107)
            else:
                process.run('chown %s %s' % (qemu_user, file_path), shell=True)

        # Set selinux of host.
        if backup_sestatus == "disabled":
            test.cancel("SELinux is in Disabled mode."
                        "It must be Enabled to"
                        "run this test")
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s", qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s", export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     emulated_image,
                     image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                             shell=True)
        if result.exit_status:
            test.cancel("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = test.tmpdir
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            test.cancel("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name,
                                   source=img_path,
                                   target="vdf",
                                   extra=extra,
                                   debug=True)
        if result.exit_status:
            test.fail("Failed to attach disk %s to VM."
                      "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug(
                "attached image ownership on nfs server before "
                "start: %s", img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug(
                    "attached image ownership on nfs server after"
                    " start: %s", img_label_after)

            if status_error:
                test.fail('Test succeeded in negative case.')
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    test.fail("Failed to create snapshot. Error:%s." %
                              snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in list(disks_snap.values()):
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name,
                                  snapshot_name,
                                  "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name,
                              target="vdf",
                              extra="--persistent",
                              debug=True)
        except process.CmdError:
            test.fail("Detach disk 'vdf' from VM %s failed." % vm.name)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        for path, label in list(backup_labels_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        if snapshot_name:
            backup_xml.sync("--snapshots-metadata")
        else:
            backup_xml.sync()
        for i in disk_snap_path:
            if i and os.path.exists(i):
                os.unlink(i)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except test.fail as detail:
                logging.error(str(detail))
        utils_selinux.set_status(backup_sestatus)
        libvirtd.restart()
Esempio n. 8
0
def run(test, params, env):
    """
    Test stdio_handler parameter in qemu.conf to use for handling stdout/stderr
    output from QEMU processes.

    1) Change stdio_handler in qemu.conf;
    2) Restart libvirtd daemon;
    3) Check if libvirtd successfully started;
    4) Check if virtlogd.socket is running;
    5) Configure pty serial and console;
    6) Check if VM log file exists and has correct permission and owner;
    7) Check if VM log file is opened by virtlogd;
    8) Check if VM start log is written into VM log file correctly;
    9) Check if QEMU use pipe provided by virtlogd daemon for logging;
    10) Check if VM shutdown log is written into VM log file correctly;
    11) Check if pipe file can be closed gracefully after VM shutdown;
    12) Check if VM restart log can be appended to the end of previous log file;
    """
    def clean_up_vm_log_file(vm_name, guest_log_file):
        """
        Clean up VM log file.

        :params vm_name: guest name
        :params guest_log_file: the path of VM log file
        """
        # Delete VM log file if exists.
        if os.path.exists(guest_log_file):
            os.remove(guest_log_file)

    def configure(cmd, guest_log_file=None, errorMsg=None):
        """
        Configure qemu log.

        :param cmd: execute command string.
        :param guest_log_file: the path of VM log file.
        :param errorMsg: error message if failed
        :return: pipe node.
        """
        # If guest_log_file is not None,check if VM log file exists or not.
        if guest_log_file and not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        # If errorMsg is not None, check command running result.
        elif errorMsg:
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.error(errorMsg)
        # Get pipe node.
        else:
            result = process.run(cmd,
                                 timeout=90,
                                 ignore_status=True,
                                 shell=True)
            ret, output = result.exit_status, result.stdout_text
            if ret:
                test.fail("Failed to get pipe node")
            else:
                return output

    def configure_serial_console(vm_name, dev_type, guest_log_file=None):
        """
        Configure serial console.

        :params vm_name: guest name
        :params dev_type: device type
        :params guest_log_file: the path of VM log file
        """
        guest_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        guest_xml.remove_all_device_by_type('serial')
        guest_xml.remove_all_device_by_type('console')

        serial = Serial(dev_type)
        serial.target_port = '0'

        console = Console(dev_type)
        console.target_port = '0'
        console.target_type = 'serial'

        if dev_type == "file" and guest_log_file is not None:
            serial.sources = console.sources = [{
                'path': guest_log_file,
                'append': 'off'
            }]
        guest_xml.add_device(serial)
        guest_xml.add_device(console)
        guest_xml.sync()

    def check_vm_log_file_permission_and_owner(vm_name, guest_log_file):
        """
        Check VM log file permission and owner.

        :params vm_name: guest name
        :params guest_log_file: the path of VM log file
        """
        # Check VM log file permission.
        logging.info("guest log file: %s", guest_log_file)
        if not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        permission = oct(stat.S_IMODE(os.lstat(guest_log_file).st_mode))
        if permission != '0600' and permission != '0o600':
            test.fail(
                "VM log file: %s expect to get permission:0600, got %s ." %
                (guest_log_file, permission))
        # Check VM log file owner.
        owner = getpwuid(stat.S_IMODE(os.lstat(guest_log_file).st_uid)).pw_name
        if owner != 'root':
            test.fail("VM log file: %s expect to get owner:root, got %s ." %
                      (guest_log_file, owner))

    def check_info_in_vm_log_file(vm_name,
                                  guest_log_file,
                                  cmd=None,
                                  matchedMsg=None):
        """
        Check if log information is written into log file correctly.

        :params vm_name: guest name
        :params guest_log_file: the path of VM log file
        :params cmd: execute command string
        :params matchedMsg: match message
        """
        # Check VM log file is opened by virtlogd.
        if not os.path.exists(guest_log_file):
            test.fail("Expected VM log file: %s not exists" % guest_log_file)

        if cmd is None:
            cmd = ("grep -nr '%s' %s" % (matchedMsg, guest_log_file))
        else:
            cmd = (cmd + " %s |grep '%s'" % (guest_log_file, matchedMsg))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Failed to get VM started log from VM log file: %s." %
                      guest_log_file)

    def check_pipe_closed(pipe_node):
        """
        Check pipe used by QEMU is closed gracefully after VM shutdown.
        """
        # Check pipe node can not be listed after VM shutdown.
        cmd = ("lsof  -w |grep pipe|grep virtlogd|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True,
                           shell=True).exit_status:
            test.fail("pipe node: %s is not closed in virtlogd gracefully." %
                      pipe_node)

        cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True,
                           shell=True).exit_status:
            test.fail("pipe node: %s is not closed in qemu gracefully." %
                      pipe_node)

    def check_service_status(service_name, service_start=False):
        """
        Check service status and return service PID

        :param service_name: service name
        :param service_start: whether to start service or not
        :return: service PID
        """
        # Check service status
        cmd = ("systemctl status %s | grep 'Active: active'" % service_name)
        ret = process.run(cmd, ignore_status=True, shell=True, verbose=True)
        if ret.exit_status:
            # If service isn't active and setting 'service_start', start service.
            if service_start:
                ret = process.run("systemctl start %s" % service_name,
                                  shell=True)
                if ret.exit_status:
                    test.fail("%s start failed." % service_name)
            # If service isn't active and don't set 'service_start', return error.
            else:
                test.fail("%s is not active." % service_name)
        cmd = ("systemctl status %s | grep 'Main PID:'" % service_name)
        ret = process.run(cmd, ignore_status=True, shell=True, verbose=True)
        if ret.exit_status:
            test.fail("Get %s status failed." % service_name)
        return ret.stdout_text.split()[2]

    def reload_and_check_virtlogd():
        """
        Reload and check virtlogd status
        """
        virtlogd_pid = check_service_status("virtlogd", service_start=True)
        logging.info("virtlogd PID: %s", virtlogd_pid)
        ret = process.run("systemctl reload virtlogd", shell=True)
        if ret.exit_status:
            test.fail("virtlogd reload failed.")
        reload_virtlogd_pid = check_service_status("virtlogd",
                                                   service_start=True)
        logging.info("After reload, virtlogd PID: %s", reload_virtlogd_pid)
        if virtlogd_pid != reload_virtlogd_pid:
            test.fail("After reload, virtlogd PID changed.")

    def configure_spice(vm_name):
        """
        Configure spice

        :params vm_name: guest name
        """
        vm_spice_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        vm_spice_xml.remove_all_device_by_type('graphics')

        graphic = Graphics(type_name='spice')
        graphic.autoport = "yes"
        graphic.port = "-1"
        graphic.tlsPort = "-1"
        vm_spice_xml.add_device(graphic)
        vm_spice_xml.sync()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "virtlogd_disabled")
    stdio_handler = params.get("stdio_handler", "not_set")
    start_vm = "yes" == params.get("start_vm", "yes")
    reload_virtlogd = "yes" == params.get("reload_virtlogd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    with_spice = "yes" == params.get("with_spice", "no")
    with_console_log = "yes" == params.get("with_console_log", "no")

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if with_console_log:
        guest_log_file = os.path.join(QEMU_LOG_PATH,
                                      "%s-console.log" % vm_name)
    else:
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if stdio_handler != 'not_set':
            config['stdio_handler'] = "'%s'" % stdio_handler
        if restart_libvirtd:
            virtlogd_pid = check_service_status("virtlogd", service_start=True)
            logging.info("virtlogd pid: %s", virtlogd_pid)
            check_service_status("libvirtd", service_start=True)

        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with stdio_handler=%s' % stdio_handler)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with stdio_handler=%s' % stdio_handler)

        if not start_vm:
            if reload_virtlogd:
                reload_and_check_virtlogd()
        else:
            # Stop all VMs if VMs are already started.
            for tmp_vm in env.get_all_vms():
                if tmp_vm.is_alive():
                    tmp_vm.destroy(gracefully=False)

            # Sleep a few seconds to let VM syn underlying data
            time.sleep(3)

            # Remove VM previous log file.
            clean_up_vm_log_file(vm_name, guest_log_file)

            # Check if virtlogd socket is running.
            cmd = ("systemctl status virtlogd.socket|grep 'Active: active'")
            configure(cmd, errorMsg="virtlogd.socket is not running")

            # Configure spice
            if with_spice:
                configure_spice(vm_name)

            # Configure serial console.
            if with_console_log:
                configure_serial_console(vm_name, 'file', guest_log_file)
            else:
                configure_serial_console(vm_name, 'pty')

            logging.info("final vm:")
            logging.info(VMXML.new_from_inactive_dumpxml(vm_name))

            # Start VM.
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                test.fail("VM failed to start." "Error: %s" % str(detail))
            # Wait for write log to console log file
            if with_console_log:
                vm.wait_for_login()

            # Check VM log file has right permission and owner.
            check_vm_log_file_permission_and_owner(vm_name, guest_log_file)
            utils_package.package_install(['lsof'])
            # Check VM log file is opened by virtlogd.
            cmd = ("lsof -w %s|grep 'virtlogd'" % guest_log_file)
            errorMessage = "VM log file: %s is not opened by:virtlogd." % guest_log_file
            configure(cmd, guest_log_file, errorMessage)

            # Check VM started log is written into log file correctly.
            if not with_console_log:
                check_info_in_vm_log_file(
                    vm_name,
                    guest_log_file,
                    matchedMsg="char device redirected to /dev/pts")

            # Get pipe node opened by virtlogd for VM log file.
            pipe_node_field = "$9"
            # On latest release,No.8 field in lsof returning is pipe node number.
            if libvirt_version.version_compare(4, 3, 0):
                pipe_node_field = "$8"
            cmd = (
                "lsof  -w |grep pipe|grep virtlogd|tail -n 1|awk '{print %s}'"
                % pipe_node_field)
            pipe_node = configure(cmd)

            if restart_libvirtd:
                libvirtd.restart()
                new_virtlogd_pid = check_service_status("virtlogd",
                                                        service_start=True)
                logging.info("After libvirtd restart, virtlogd PID: %s",
                             new_virtlogd_pid)
                new_pipe_node = configure(cmd)
                logging.info("After libvirtd restart, pipe node: %s",
                             new_pipe_node)
                if pipe_node != new_pipe_node and new_pipe_node != new_virtlogd_pid:
                    test.fail("After libvirtd restart, pipe node changed.")

            if with_spice or with_console_log:
                reload_and_check_virtlogd()

            # Check if qemu-kvm use pipe node provided by virtlogd.
            cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
            errorMessage = ("Can not find matched pipe node: %s "
                            "from pipe list used by qemu-kvm." % pipe_node)
            configure(cmd, errorMsg=errorMessage)

            # Shutdown VM.
            if not vm.shutdown():
                vm.destroy(gracefully=True)

            # Check qemu log works well
            if with_spice:
                check_info_in_vm_log_file(
                    vm_name,
                    guest_log_file,
                    matchedMsg="qemu-kvm: terminating on signal 15 from pid")

            # Check VM shutdown log is written into log file correctly.
            if with_console_log:
                check_info_in_vm_log_file(vm_name,
                                          guest_log_file,
                                          matchedMsg="Powering off")
            else:
                check_info_in_vm_log_file(vm_name,
                                          guest_log_file,
                                          matchedMsg="shutting down")

            # Check pipe is closed gracefully after VM shutdown.
            check_pipe_closed(pipe_node)

            # Start VM again.
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                test.fail("VM failed to start." "Error: %s" % str(detail))
            # Check the new VM start log is appended to the end of the VM log file.
            if not with_console_log:
                check_info_in_vm_log_file(
                    vm_name,
                    guest_log_file,
                    cmd="tail -n 5",
                    matchedMsg="char device redirected to /dev/pts")

    finally:
        config.restore()
        libvirtd.restart()
        vm_xml_backup.sync()
Esempio n. 9
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []
    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                sec_dict['model'] = model
                if sec_type != "none":
                    sec_dict['label'] = sec_label
                sec_dict_copy = sec_dict.copy()
                sec_dict_list.append(sec_dict_copy)
        else:
            sec_dict['model'] = sec_model
            if sec_type != "none":
                sec_dict['label'] = sec_label
            sec_dict_list.append(sec_dict)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
        "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Set disk label
        for disk in disks.values():
            disk_path = disk['source']
            utils_selinux.set_context_of_file(filename=disk_path,
                                              context=img_label)
            os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined
                or security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after "
                                     "starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s" %
                                     (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s." %
                                     (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                if not disk_context == imagelabel:
                    raise error.TestFail("Label of disk is not relabeled by "
                                         "VM\nDetal: disk_context="
                                         "%s, imagelabel=%s" %
                                         (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                # Bug 547546 - RFE: the security drivers must remember original
                # permissions/labels and restore them after
                # https://bugzilla.redhat.com/show_bug.cgi?id=547546
                err_msg = "Label of disk is not restored in VM shuting down.\n"
                err_msg += "Detail: img_label_after=%s, " % img_label_after
                err_msg += "img_label_before=%s.\n" % img_label
                err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=547546"
                raise error.TestFail(err_msg)
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        for path, label in backup_ownership_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined
                or security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Esempio n. 10
0
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(test.tmpdir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    timeout = int(params.get("timeout", "5"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    def check_domstate(actual, options):
        """
        Check the domain status according to dump options.
        """

        if options.find('live') >= 0:
            domstate = "running"
            if options.find('crash') >= 0 or options.find('reset') > 0:
                domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        elif options.find('crash') >= 0:
            domstate = "shut off"
            if options.find('reset') >= 0:
                domstate = "running"
        elif options.find('reset') >= 0:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        else:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"

        if not start_vm:
            domstate = "shut off"

        logging.debug("Domain should %s after run dump %s", domstate, options)

        return (domstate == actual)

    def check_dump_format(dump_image_format, dump_file):
        """
        Check the format of dumped file.

        If 'dump_image_format' is not specified or invalid in qemu.conf, then
        the file shoule be normal raw file, otherwise it shoud be compress to
        specified format, the supported compress format including: lzop, gzip,
        bzip2, and xz.
        For memory-only dump, the default dump format is ELF, and it can also
        specify format by --format option, the result could be 'elf' or 'data'.
        """

        valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data']
        if len(dump_image_format
               ) == 0 or dump_image_format not in valid_format:
            logging.debug("No need check the dumped file format")
            return True
        else:
            file_cmd = "file %s" % dump_file
            (status, output) = commands.getstatusoutput(file_cmd)
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with bypass-cache option
    child_pid = 0
    if options.find('bypass-cache') >= 0:
        pid = os.fork()
        if pid:
            # Guarantee check_bypass function has run before dump
            child_pid = pid
            try:
                wait_pid_active(pid, timeout)
            finally:
                os.kill(child_pid, signal.SIGUSR1)
        else:
            check_bypass(dump_file)
            # Wait for parent process over
            while True:
                time.sleep(1)

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            raise error.TestNAError("Current libvirt version doesn't support"
                                    " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            raise error.TestNAError("Unsupported dump format '%s' for"
                                    " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
        raise error.TestError("invalid dumpCore value: %s" % dump_guest_core)
    try:
        # Set dumpCore in guest xml
        if dump_guest_core:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.dumpcore = dump_guest_core
            vmxml.sync()
            vm.start()
            # check qemu-kvm cmdline
            vm_pid = vm.get_pid()
            cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid
            cmd += "|grep dump-guest-core=%s" % dump_guest_core
            result = utils.run(cmd, ignore_status=True)
            logging.debug("cmdline: %s" % result.stdout)
            if result.exit_status:
                error.TestFail("Not find dump-guest-core=%s in qemu cmdline" %
                               dump_guest_core)
            else:
                logging.info("Find dump-guest-core=%s in qemum cmdline",
                             dump_guest_core)

        # Run virsh command
        cmd_result = virsh.dump(vm_name,
                                dump_file,
                                options,
                                unprivileged_user=unprivileged_user,
                                uri=uri,
                                ignore_status=True,
                                debug=True)
        status = cmd_result.exit_status

        logging.info("Start check result")
        if not check_domstate(vm.state(), options):
            raise error.TestFail("Domain status check fail.")
        if status_error:
            if not status:
                raise error.TestFail("Expect fail, but run successfully")
        else:
            if status:
                raise error.TestFail("Expect succeed, but run fail")
            if not os.path.exists(dump_file):
                raise error.TestFail("Fail to find domain dumped file.")
            if check_dump_format(dump_image_format, dump_file):
                logging.info("Successfully dump domain to %s", dump_file)
            else:
                raise error.TestFail("The format of dumped file is wrong.")
    finally:
        if child_pid:
            os.kill(child_pid, signal.SIGUSR1)
        if os.path.isfile(dump_file):
            os.remove(dump_file)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        qemu_config.restore()
        libvirtd.restart()
Esempio n. 11
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(test.tmpdir, "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name,
                           dump_file,
                           dump_option,
                           ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref,
                                    extra,
                                    ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in [
                            'coredump-destroy', 'coredump-restart'
                    ]:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart"
                            and check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists" %
                                      libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" %
                               (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True,
                                           shell=True).exit_status:
                            test.fail(
                                "Find error message %s from log file: %s." %
                                (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Esempio n. 12
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if discard:
            cmd += " | grep 'discard-data=yes'"
        elif max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
            if tg_size:
                size = int(tg_size) * 1024
                if huge_pages or discard or cold_plug_discard:
                    cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-file,id=%s' | grep 'size=%s" %
                        (cmd_str, size))
                else:
                    cmd_str = 'mem.\|memory-backend-ram,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                        (cmd_str, size))

                if pg_size:
                    cmd += ",host-nodes=%s" % node_mask
                    if numa_memnode:
                        for node in numa_memnode:
                            if ('nodeset' in node
                                    and node['nodeset'] in node_mask):
                                cmd += ",policy=%s" % node['mode']
                    cmd += ".*pc-dimm,node=%s" % tg_node
                if mem_addr:
                    cmd += (".*slot=%s" % (mem_addr['slot']))
                cmd += "'"
            if cold_plug_discard:
                cmd += " | grep 'discard-data=yes'"

        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem),
            30,
            first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem)
            logging.info("detach_device is %s", detach_device)
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 262144:
                logging.error('%s not align to 256', key)
                if key == 'currentMemory':
                    continue
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info(
                'Check Pass: Memory is equal to (all numa memory + memory device)'
            )
        else:
            test.fail(
                'Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages or discard or cold_plug_discard:
            membacking = vm_xml.VMMemBackingXML()
            membacking.discard = True
            membacking.source = ''
            membacking.source_type = 'file'
            if huge_pages:
                hugepages = vm_xml.VMHugepagesXML()
                pagexml_list = []
                for i in range(len(huge_pages)):
                    pagexml = hugepages.PageXML()
                    pagexml.update(huge_pages[i])
                    pagexml_list.append(pagexml)
                hugepages.pages = pagexml_list
                membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    detach_alias = "yes" == params.get("detach_alias", "no")
    detach_alias_options = params.get("detach_alias_options")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    wait_before_save_secs = int(params.get("wait_before_save_secs", 0))
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))
    discard = "yes" == params.get("discard", "no")
    cold_plug_discard = "yes" == params.get("cold_plug_discard", "no")
    if cold_plug_discard or discard:
        mem_discard = 'yes'
    else:
        mem_discard = 'no'

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    huge_page_num = int(params.get('huge_page_num', 2000))
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        cpu_arch = cpu_util.get_cpu_arch()
        if cpu_arch == 'power8':
            pg_size = '16384'
            huge_page_num = 200
        elif cpu_arch == 'power9':
            pg_size = '2048'
            huge_page_num = 2000
        [x.update({'size': pg_size}) for x in huge_pages]
        setup_hugepages(int(pg_size), shp_num=huge_page_num)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()
        numa_info = utils_misc.NumaInfo()
        logging.debug(numa_info.get_all_node_meminfo())

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        elif discard:
            vm.start()
            session = vm.wait_for_login()
            check_qemu_cmd(max_mem_rt, tg_size)
        dev_xml = None

        # To attach the memory device.
        if (add_mem_device and not hot_plug) or cold_plug_discard:
            at_times = int(params.get("attach_times", 1))
            randvar = 0
            if rand_reboot:
                rand_value = random.randint(15, 25)
                logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                device_alias = "ua-" + str(uuid.uuid4())
                dev_xml = utils_hotplug.create_mem_xml(
                    tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node,
                    node_mask, mem_model, mem_discard, device_alias)
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    vm.reboot()
                    vm.wait_for_login()
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail(
                    'Memory after attach not equal to original mem + attached mem'
                )

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            # Increase the memory consumed to  1500
            consume_vm_mem(1500)
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            # Wait 10s for vm to be ready before managedsave
            time.sleep(wait_before_save_secs)
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)

            def _wait_for_vm_start():
                try:
                    vm.start()
                    return True
                except Exception as e:
                    logging.error(e)

            utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5)
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            # Wait 10s for vm to be ready before save
            time.sleep(wait_before_save_secs)
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model, mem_discard)
            for x in xrange(at_times):
                if not detach_alias:
                    ret = virsh.detach_device(vm_name,
                                              dev_xml.xml,
                                              flagstr=attach_option,
                                              debug=True)
                else:
                    ret = virsh.detach_device_alias(vm_name,
                                                    device_alias,
                                                    detach_alias_options,
                                                    debug=True)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could
                            #  not be unplugged.The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured, while hot"
                                " unplug: %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
Esempio n. 13
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    dump_path = os.path.join(test.tmpdir, "dump/")
    os.mkdir(dump_path)
    dump_file = ""
    try:
        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                panic_dev.addr_type = "isa"
                panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd_service.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + vm_name + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                raise exceptions.TestSkipError(
                    "No 'panic' device in the guest. Maybe your libvirt "
                    "version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                libvirtd_service.stop()
                utils_misc.kill_process_by_pattern(vm_name)
                libvirtd_service.restart()
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
        except process.CmdError, detail:
            raise exceptions.TestError(
                "Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd_service.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref, extra, ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                raise exceptions.TestFail(
                    "Run successfully with wrong command!")
        else:
            if status or not output:
                raise exceptions.TestFail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        raise ActionError(vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        raise ActionError(vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        raise ActionError(vm_action)
                elif vm_action == "start":
                    if not output.count("booted"):
                        raise ActionError(vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        raise ActionError(vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        raise ActionError(vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or
                        re.search("blocked", output) or
                        re.search("idle", output)):
                    raise exceptions.TestFail("Run failed with right command")
Esempio n. 14
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in disks.values():
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" %
                                            pool_name)
            else:
                vol_arg = {
                    'name': vol_name,
                    'format': vol_format,
                    'capacity': 1073741824,
                    'allocation': 1048576,
                }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                extra = "--driver qemu --type lun --rawio --persistent"
            else:
                extra = "--persistent --subdriver qcow2"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
            if result.exit_status:
                raise error.TestNAError("Failed to set virt_use_nfs value")
        else:
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            utils_selinux.set_context_of_file(filename=img_path,
                                              context=img_label)
            extra = "--persistent"

        # Do the attach action.
        result = virsh.attach_disk(vm_name,
                                   source=img_path,
                                   target="vdf",
                                   extra=extra,
                                   debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                raise error.TestFail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(
                                val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        raise error.TestFail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        try:
            virsh.detach_disk(vm_name,
                              target="vdf",
                              extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed." %
                                 vm.name)
Esempio n. 15
0
def run(test, params, env):
    """
    Test seccomp_sandbox parameter in qemu.conf.

    1) Change seccomp_sandbox in qemu.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check if qemu command line changed accordingly;
    """
    def get_qemu_command_sandbox_option(vm):
        """
        Get the sandbox option of qemu command line of a libvirt VM.

        :param vm: A libvirt_vm.VM class instance.
        :return :  A string containing '-sandbox' option of VM's qemu command
                   line or None if not found.
        """
        if vm.is_dead():
            vm.start()

        # Get qemu command line.
        pid = vm.get_pid()
        res = utils.run("ps -p %s -o cmd h" % pid)

        if res.exit_status == 0:
            match = re.search(r'-sandbox\s*(\S*)', res.stdout)
            if match:
                return match.groups()[0]

    vm_name = params.get("main_vm", "virt-tests-vm1")
    expected_result = params.get("expected_result", "not_set")
    seccomp_sandbox = params.get("seccomp_sandbox", "not_set")
    vm = env.get_vm(vm_name)

    # Get old qemu -sandbox option.
    orig_qemu_sandbox = get_qemu_command_sandbox_option(vm)
    logging.debug('Original "-sandbox" option of qemu command is '
                  '"%s".' % orig_qemu_sandbox)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if seccomp_sandbox == 'not_set':
            del config.seccomp_sandbox
        else:
            config.seccomp_sandbox = seccomp_sandbox

        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                raise error.TestFail('Libvirtd is expected to be started '
                                     'with seccomp_sandbox = '
                                     '%s' % seccomp_sandbox)
            return
        if expected_result == 'unbootable':
            raise error.TestFail('Libvirtd is not expected to be started '
                                 'with seccomp_sandbox = '
                                 '%s' % seccomp_sandbox)

        # Restart VM to create a new qemu command line.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        # Get new qemu -sandbox option.
        new_qemu_sandbox = get_qemu_command_sandbox_option(vm)
        logging.debug('New "-sandbox" option of qemu command is '
                      '"%s"' % new_qemu_sandbox)

        if new_qemu_sandbox is None:
            if expected_result != 'not_set':
                raise error.TestFail('Qemu sandbox option is expected to set '
                                     'but %s found', new_qemu_sandbox)
        else:
            if expected_result != new_qemu_sandbox:
                raise error.TestFail('Qemu sandbox option is expected to be '
                                     '%s, but %s found' % (
                                         expected_result, new_qemu_sandbox))
    finally:
        config.restore()
        libvirtd.restart()
Esempio n. 16
0
def run(test, params, env):
    """
    Test set_process_name parameter in qemu.conf.

    1) Change set_process_name in qemu.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check if qemu command line changed accordingly;
    """
    def get_qemu_command_name_option(vm):
        """
        Get the name option of qemu command line of a libvirt VM.

        :param vm: A libvirt_vm.VM class instance.
        :return :  A string containing '-name' option of VM's qemu command
                   line or None if error.
        """
        if vm.is_dead():
            vm.start()

        # Get qemu command line.
        pid = vm.get_pid()
        res = process.run("ps -p %s -o cmd h" % pid, shell=True)

        if res.exit_status == 0:
            match = re.search(r'-name\s*(\S*)', res.stdout.strip())
            if match:
                return match.groups()[0]

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "name_not_set")
    set_process_name = params.get("set_process_name", "not_set")
    vm = env.get_vm(vm_name)

    # Get old qemu -name option.
    orig_qemu_name = get_qemu_command_name_option(vm)
    logging.debug('Original "-name" option of qemu command is '
                  '"%s".' % orig_qemu_name)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_process_name == 'not_set':
            del config.set_process_name
        else:
            config.set_process_name = set_process_name

        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with set_process_name = '
                          '%s' % set_process_name)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with set_process_name = '
                      '%s' % set_process_name)

        # Restart VM to create a new qemu command line.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        # Get new qemu -name option.
        new_qemu_name = get_qemu_command_name_option(vm)
        logging.debug('New "-name" option of qemu command is '
                      '"%s"' % new_qemu_name)

        if ',process=qemu:%s' % vm_name in new_qemu_name:
            if expected_result == 'name_not_set':
                test.fail('Qemu name is not expected to set, '
                          'but %s found' % new_qemu_name)
        else:
            if expected_result == 'name_set':
                test.fail('Qemu name is expected to set, '
                          'but %s found' % new_qemu_name)
    finally:
        config.restore()
        libvirtd.restart()
Esempio n. 17
0
            file_cmd = "file %s" % dump_file
            (status, output) = commands.getstatusoutput(file_cmd)
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            test.cancel("Current libvirt version doesn't support"
                        " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
Esempio n. 18
0
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    dump_dir = params.get("dump_dir", data_dir.get_tmp_dir())
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(dump_dir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    check_bypass_timeout = int(params.get("check_bypass_timeout", "120"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    def check_flag(file_flags):
        """
        Check if file flag include O_DIRECT.

        :param file_flags: The flags of dumped file

        Note, O_DIRECT(direct disk access hint) is defined as:
        on x86_64:
        #define O_DIRECT        00040000
        on ppc64le or arch64:
        #define O_DIRECT        00200000
        """
        arch = platform.machine()
        file_flag_check = int('00040000', 16)
        if 'ppc64' in arch or 'aarch64' in arch:
            file_flag_check = int('00200000', 16)

        if int(file_flags, 16) & file_flag_check == file_flag_check:
            logging.info("File flags include O_DIRECT")
            return True
        else:
            logging.error("File flags doesn't include O_DIRECT")
            return False

    def check_bypass(dump_file, result_dict):
        """
        Get the file flags of domain core dump file and check it.
        """
        error = ''
        cmd1 = "lsof -w %s |awk '/libvirt_i/{print $2}'" % dump_file
        while True:
            if not os.path.exists(dump_file):
                time.sleep(0.05)
                continue
            ret = process.run(cmd1, shell=True)
            status, output = ret.exit_status, ret.stdout_text.strip()
            if status:
                time.sleep(0.05)
                continue
            cmd2 = "cat /proc/%s/fdinfo/1 |grep flags|awk '{print $NF}'" % output
            ret = process.run(cmd2, allow_output_check='combined', shell=True)
            status, output = ret.exit_status, ret.stdout_text.strip()
            if status:
                error = "Fail to get the flags of dumped file"
                logging.error(error)
                break
            if not len(output):
                continue
            try:
                logging.debug("The flag of dumped file: %s", output)
                if check_flag(output):
                    logging.info("Bypass file system cache "
                                 "successfully when dumping")
                    break
                else:
                    error = "Bypass file system cache fail when dumping"
                    logging.error(error)
                    break
            except (ValueError, IndexError) as detail:
                error = detail
                logging.error(error)
                break
        result_dict['bypass'] = error

    def check_domstate(actual, options):
        """
        Check the domain status according to dump options.
        """

        if options.find('live') >= 0:
            domstate = "running"
            if options.find('crash') >= 0 or options.find('reset') > 0:
                domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        elif options.find('crash') >= 0:
            domstate = "shut off"
            if options.find('reset') >= 0:
                domstate = "running"
        elif options.find('reset') >= 0:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        else:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"

        if not start_vm:
            domstate = "shut off"

        logging.debug("Domain should %s after run dump %s", domstate, options)

        return (domstate == actual)

    def check_dump_format(dump_image_format, dump_file):
        """
        Check the format of dumped file.

        If 'dump_image_format' is not specified or invalid in qemu.conf, then
        the file shoule be normal raw file, otherwise it shoud be compress to
        specified format, the supported compress format including: lzop, gzip,
        bzip2, and xz.
        For memory-only dump, the default dump format is ELF, and it can also
        specify format by --format option, the result could be 'elf' or 'data'.
        """

        valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data']
        if len(dump_image_format
               ) == 0 or dump_image_format not in valid_format:
            logging.debug("No need check the dumped file format")
            return True
        else:
            file_cmd = "file %s" % dump_file
            ret = process.run(file_cmd,
                              allow_output_check='combined',
                              shell=True)
            status, output = ret.exit_status, ret.stdout_text.strip()
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Install lsof pkg if not installed
    if not utils_package.package_install("lsof"):
        test.cancel("Failed to install lsof in host\n")

    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            test.cancel("Current libvirt version doesn't support"
                        " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            test.cancel("Unsupported dump format '%s' for"
                        " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
        test.error("invalid dumpCore value: %s" % dump_guest_core)
    try:
        # Set dumpCore in guest xml
        if dump_guest_core:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.dumpcore = dump_guest_core
            vmxml.sync()
            vm.start()
            # check qemu-kvm cmdline
            vm_pid = vm.get_pid()
            cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid
            cmd += "|grep dump-guest-core=%s" % dump_guest_core
            result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("cmdline: %s" % result.stdout_text)
            if result.exit_status:
                test.fail("Not find dump-guest-core=%s in qemu cmdline" %
                          dump_guest_core)
            else:
                logging.info("Find dump-guest-core=%s in qemum cmdline",
                             dump_guest_core)

        # Deal with bypass-cache option
        if options.find('bypass-cache') >= 0:
            vm.wait_for_login()
            result_dict = multiprocessing.Manager().dict()
            child_process = multiprocessing.Process(target=check_bypass,
                                                    args=(dump_file,
                                                          result_dict))
            child_process.start()

        # Run virsh command
        cmd_result = virsh.dump(vm_name,
                                dump_file,
                                options,
                                unprivileged_user=unprivileged_user,
                                uri=uri,
                                ignore_status=True,
                                debug=True)
        status = cmd_result.exit_status
        if 'child_process' in locals():
            child_process.join(timeout=check_bypass_timeout)
            params['bypass'] = result_dict['bypass']

        logging.info("Start check result")
        if not check_domstate(vm.state(), options):
            test.fail("Domain status check fail.")
        if status_error:
            if not status:
                test.fail("Expect fail, but run successfully")
        else:
            if status:
                test.fail("Expect succeed, but run fail")
            if not os.path.exists(dump_file):
                test.fail("Fail to find domain dumped file.")
            if check_dump_format(dump_image_format, dump_file):
                logging.info("Successfully dump domain to %s", dump_file)
            else:
                test.fail("The format of dumped file is wrong.")
        if params.get('bypass'):
            test.fail(params['bypass'])
    finally:
        backup_xml.sync()
        qemu_config.restore()
        libvirtd.restart()
        if os.path.isfile(dump_file):
            os.remove(dump_file)
Esempio n. 19
0
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Create disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        test.cancel("per-image DAC only supported on version 1.2.7"
                    " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name,
                                     filearg=disk_xml,
                                     flagstr=option,
                                     debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    test.fail("The image dac label %s is not "
                              "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name,
                                    target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
Esempio n. 20
0
def run(test, params, env):
    """
    Test auto_dump_* parameter in qemu.conf.

    1) Change auto_dump_* in qemu.conf;
    2) Restart libvirt daemon;
    4) Check if file open state changed accordingly.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    bypass_cache = params.get("auto_dump_bypass_cache", "not_set")
    panic_model = params.get("panic_model")
    addr_type = params.get("addr_type")
    addr_iobase = params.get("addr_iobase")
    vm = env.get_vm(vm_name)
    target_flags = int(params.get('target_flags', '0o40000'), 8)

    if panic_model and not libvirt_version.version_compare(1, 3, 1):
        test.cancel("panic device model attribute not supported"
                    "on current libvirt version")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    dump_path = os.path.join(data_dir.get_data_dir(), "dump")
    try:
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            # Set panic device
            panic_dev = Panic()
            if panic_model:
                panic_dev.model = panic_model
            if addr_type:
                panic_dev.addr_type = addr_type
            if addr_iobase:
                panic_dev.addr_iobase = addr_iobase
            vmxml.add_device(panic_dev)
        vmxml.on_crash = "coredump-restart"
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            test.cancel("No 'panic' device in the guest, maybe "
                        "your libvirt version doesn't support it")

        # Setup qemu.conf
        if bypass_cache == 'not_set':
            del config.auto_dump_bypass_cache
        else:
            config.auto_dump_bypass_cache = bypass_cache

        config.auto_dump_path = dump_path
        if os.path.exists(dump_path):
            os.rmdir(dump_path)
        os.mkdir(dump_path)

        # Restart libvirtd to make change valid.
        libvirtd.restart()

        # Restart VM to create a new qemu process.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        # Install lsof pkg if not installed
        if not utils_package.package_install("lsof"):
            test.cancel("Failed to install lsof in host\n")

        def get_flags(dump_path, result_dict):
            cmd = "lsof -w %s/* |awk '/libvirt_i/{print $2}'" % dump_path
            start_time = time.time()
            while (time.time() - start_time) < 30:
                ret = process.run(cmd, shell=True, ignore_status=True)
                status, iohelper_pid = ret.exit_status, ret.stdout_text.strip()
                if status:
                    time.sleep(0.1)
                    continue
                if not len(iohelper_pid):
                    continue
                else:
                    logging.info('pid: %s', iohelper_pid)
                    result_dict['pid'] = iohelper_pid
                    break

            # Get file open flags containing bypass cache information.
            with open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') as fdinfo:
                flags = 0
                for line in fdinfo.readlines():
                    if line.startswith('flags:'):
                        flags = int(line.split()[1], 8)
                        logging.debug('file open flag is: %o', flags)
            result_dict['flags'] = flags
            with open('/proc/%s/cmdline' % iohelper_pid) as cmdinfo:
                cmdline = cmdinfo.readline()
                logging.debug(cmdline.split())

        session = vm.wait_for_login()
        result_dict = multiprocessing.Manager().dict()
        child_process = multiprocessing.Process(target=get_flags, args=(dump_path, result_dict))
        child_process.start()

        # Stop kdump in the guest
        session.cmd("service kdump stop", ignore_all_errors=True)
        # Enable sysRq
        session.cmd("echo 1 > /proc/sys/kernel/sysrq")
        try:
            # Crash the guest
            session.cmd("echo c > /proc/sysrq-trigger", timeout=1)
        except ShellTimeoutError:
            pass
        session.close()

        child_process.join(10)
        if child_process.is_alive():
            child_process.terminate()
        flags = result_dict['flags']
        iohelper_pid = result_dict['pid']

        # Kill core dump process to speed up test
        try:
            process.run('kill %s' % iohelper_pid)
        except process.CmdError as detail:
            logging.debug("Dump already done:\n%s", detail)

        arch = platform.machine()

        if arch in ['x86_64', 'ppc64le', 's390x']:
            # Check if bypass cache flag set or unset accordingly.
            cond1 = (flags & target_flags) and bypass_cache != '1'
            cond2 = not (flags & target_flags) and bypass_cache == '1'
            if cond1 or cond2:
                test.fail('auto_dump_bypass_cache is %s but flags '
                          'is %o' % (bypass_cache, flags))
        else:
            test.cancel("Unknown Arch. Do the necessary changes to"
                        " support")

    finally:
        backup_xml.sync()
        config.restore()
        libvirtd.restart()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Esempio n. 21
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    def replace_qemu_cmdline(cmdline_list):
        """
        Replace the expected qemu command line for new machine type

        :param cmdline_list: The list for expected qemu command lines
        :return: The list contains the updated qemu command lines if any
        """
        os_xml = getattr(vmxml, "os")
        machine_ver = getattr(os_xml, 'machine')
        if (machine_ver.startswith("pc-q35-rhel")
                and machine_ver > 'pc-q35-rhel8.2.0'
                and libvirt_version.version_compare(6, 4, 0)):
            # Replace 'node,nodeid=0,cpus=0-1,mem=512' with
            # 'node,nodeid=0,cpus=0-1,memdev=ram-node0'
            # Replace 'node,nodeid=1,cpus=2-3,mem=512' with
            # 'node,nodeid=1,cpus=2-3,memdev=ram-node1'
            for cmd in cmdline_list:
                line = cmd['cmdline']
                try:
                    node = line.split(',')[1][-1]
                    cmd['cmdline'] = line.replace(
                        'mem=512', 'memdev=ram-node{}'.format(node))
                # We can skip replacing, when the cmdline parameter is empty.
                except IndexError:
                    pass

        return cmdline_list

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    dynamic_node_replacement(params, host_numa_node, test)
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]),
                               int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1",
                    ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    expect_cpus = params.get('expect_cpus')
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    cpu_num = cpu.get_cpu_info().get('CPU(s)')
    if vcpu_num > int(cpu_num):
        test.cancel('Number of vcpus(%s) is larger than number of '
                    'cpus on host(%s).' % (vcpu_num, cpu_num))

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration." %
                            page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += cpu.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += cpu.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += cpu.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(
                        i['nodenum'], i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = vmcpuxml.dicts_to_cells(numa_cell)
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm " "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = cpu.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = cpu.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        cmdline_list = replace_qemu_cmdline(cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = cpu.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = cpu.cpus_parser(cpu_str)
            cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"])
            if i == 0 and expect_cpus:
                cpu_list = cpu.cpus_parser(expect_cpus)
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected" %
                          (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." %
                              topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                              i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Esempio n. 22
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {'model': sec_model, 'label': img_label, 'relabel': sec_relabel}
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {'name': vol_name, 'format': vol_format,
                           'capacity': 1073741824,
                           'allocation': 1048576, }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" % qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path, context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path},
                                                      "seclabels": source_seclabel})
        else:
            disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1, 0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s", astring.to_text(output.strip().split()[-2]))
                if default_label not in astring.to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name, filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
Esempio n. 23
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set image and
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    (4) Destroy VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
    qemu_group_user = "******" == params.get("qemu_group_user", "no")
    # Get variables about seclabel for VM.
    sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
    sec_label = params.get("dac_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
    security_default_confined = params.get("security_default_confined", None)
    set_process_name = params.get("set_process_name", None)
    sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
    if sec_label:
        sec_dict['label'] = sec_label
    set_sec_label = "yes" == params.get("set_sec_label", "no")
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    qemu_no_usr_grp = "yes" == params.get("qemu_no_usr_grp", "no")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", None)
    qemu_group = params.get("qemu_group", None)
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('dac_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    qemu_disk_mod = False
    for disk in list(disks.values()):
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        label_list = img_label.split(":")
        os.chown(disk_path, int(label_list[0]), int(label_list[1]))
        os.close(f)
        st = os.stat(disk_path)
        if not bool(st.st_mode & stat.S_IWGRP):
            # add group wirte mode to disk by chmod g+w
            os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
            qemu_disk_mod = True

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    def _create_user():
        """
        Create a "vdsm_fake" in 'qemu' group for test
        """
        logging.debug("create a user 'vdsm_fake' in 'qemu' group")
        cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
        process.run(cmd, ignore_status=False, shell=True)

    create_qemu_user = False
    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Check qemu_group_user
        if qemu_group_user:
            if set_qemu_conf:
                if "EXAMPLE" in qemu_user:
                    if not check_qemu_grp_user("vdsm_fake", test):
                        _create_user()
                        create_qemu_user = True
                    qemu_user = "******"
                    qemu_group = "qemu"
            if set_sec_label:
                if sec_label:
                    if "EXAMPLE" in sec_label:
                        if not check_qemu_grp_user("vdsm_fake", test):
                            _create_user()
                            create_qemu_user = True
                        sec_label = "vdsm_fake:qemu"
                        sec_dict['label'] = sec_label
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        if set_qemu_conf:
            # Transform qemu user and group to "uid:gid"
            qemu_user = qemu_user.replace("+", "")
            qemu_group = qemu_group.replace("+", "")
            qemu_conf_label_trans = format_user_group_str(qemu_user, qemu_group)

            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            if security_default_confined:
                qemu_conf.security_default_confined = security_default_confined
            if set_process_name:
                qemu_conf.set_process_name = set_process_name
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if set_sec_label:
            # Transform seclabel to "uid:gid"
            if sec_label:
                sec_label = sec_label.replace("+", "")
                if ":" in sec_label:
                    user, group = sec_label.split(":")
                    sec_label_trans = format_user_group_str(user, group)

            # Set the context of the VM.
            logging.debug("sec_dict is %s" % sec_dict)
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()
            logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the qemu process and image.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)

            # Get vm image label when VM is running
            f = os.open(list(disks.values())[0]['source'], 0)
            stat_re = os.fstat(f)
            disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)

            # Check vm process and image DAC label after vm start
            if set_sec_label and sec_label:
                if ":" in sec_label:
                    if vm_context != sec_label_trans:
                        test.fail("Label of VM processs is not "
                                  "expected after starting.\nDetail:"
                                  "vm_context=%s, sec_label_trans=%s"
                                  % (vm_context, sec_label_trans))
                    if sec_relabel == "yes":
                        if dynamic_ownership:
                            if disk_context != sec_label_trans:
                                test.fail("Label of disk is not " +
                                          "expected" +
                                          " after VM starting.\n" +
                                          "Detail: disk_context" +
                                          "=%s" % disk_context +
                                          ", sec_label_trans=%s."
                                          % sec_label_trans)
            elif(set_qemu_conf and not security_default_confined and not
                 qemu_no_usr_grp):
                if vm_context != qemu_conf_label_trans:
                    test.fail("Label of VM processs is not expected"
                              " after starting.\nDetail: vm_context="
                              "%s, qemu_conf_label_trans=%s"
                              % (vm_context, qemu_conf_label_trans))
                if disk_context != qemu_conf_label_trans:
                    if dynamic_ownership:
                        test.fail("Label of disk is not expected " +
                                  "after VM starting.\nDetail: di" +
                                  "sk_context=%s, " % disk_context +
                                  "qemu_conf_label_trans=%s." %
                                  qemu_conf_label_trans)

            # check vm started with -name $vm_name,process=qemu:$vm_name
            if set_process_name:
                if libvirt_version.version_compare(1, 3, 5):
                    chk_str = "-name guest=%s,process=qemu:%s" % (vm_name, vm_name)
                else:
                    chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
                cmd = "ps -p %s -o command=" % vm_pid
                result = process.run(cmd, shell=True)
                if chk_str in result.stdout_text:
                    logging.debug("%s found in vm process command: %s" %
                                  (chk_str, result.stdout_text))
                else:
                    test.fail("%s not in vm process command: %s" %
                              (chk_str, result.stdout_text))

            # Check the label of disk after VM being destroyed.
            vm.destroy()
            f = os.open(list(disks.values())[0]['source'], 0)
            stat_re = os.fstat(f)
            img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)
            if set_sec_label and sec_relabel == "yes":
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when sec_relabel enabled.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
            elif set_qemu_conf and not set_sec_label:
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when only set qemu.conf.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
                else:
                    if (not img_label_after == img_label):
                        test.fail("Bug: Label of disk is changed\n"
                                  "Detail: img_label_after=%s, "
                                  "img_label=%s.\n"
                                  % (img_label_after, img_label))
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                err_msg = "Domain start failed as expected, check "
                err_msg += "more in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=856951"
                if set_sec_label:
                    if sec_label:
                        if sec_relabel == "yes" and sec_label_trans == "0:0":
                            if set_qemu_conf and not qemu_no_usr_grp:
                                if qemu_conf_label_trans == "107:107":
                                    logging.debug(err_msg)
                        elif sec_relabel == "no" and sec_label_trans == "0:0":
                            if not set_qemu_conf:
                                logging.debug(err_msg)
                else:
                    test.fail("Test failed in positive case."
                              "error: %s" % e)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
            if qemu_disk_mod:
                st = os.stat(path)
                os.chmod(path, st.st_mode ^ stat.S_IWGRP)
        if set_sec_label:
            backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        if create_qemu_user:
            cmd = "userdel -r vdsm_fake"
            output = process.run(cmd, ignore_status=True, shell=True)
        utils_selinux.set_status(backup_sestatus)
Esempio n. 24
0
def run(test, params, env):
    """
    Test DAC in save/restore domain to nfs pool.

    (1).Init variables for test.
    (2).Create nfs pool
    (3).Start VM and check result.
    (4).Save domain to the nfs pool.
    (5).Restore domain from the nfs file.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_save_restore_host_selinux",
                               "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options", "rw,async,no_root_squash")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool file variables
    pre_file = "yes" == params.get("pre_file", "yes")
    pre_file_name = params.get("pre_file_name", "dac_nfs_file")
    file_tup = ("file_user", "file_group", "file_mode")
    file_val = []
    for i in file_tup:
        try:
            file_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    file_user, file_group, file_mode = file_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk mode to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for save/restore
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     emulated_image,
                     image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # Set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Create a file on nfs server dir.
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        server_file_path = os.path.join(nfs_path, pre_file_name)
        if pre_file and not os.path.exists(server_file_path):
            open(server_file_path, 'a').close()
        if not pre_file and os.path.exists(server_file_path):
            raise error.TestNAError("File %s already exist in pool %s" %
                                    (server_file_path, pool_name))

        # Get nfs mount file path
        mnt_path = os.path.join(tmp_dir, pool_target)
        mnt_file_path = os.path.join(mnt_path, pre_file_name)

        # Change img ownership and mode on nfs server dir
        if pre_file:
            os.chown(server_file_path, file_user, file_group)
            os.chmod(server_file_path, file_mode)

        # Start VM.
        try:
            vm.start()
            # Start VM successfully.
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            raise error.TestFail("Domain failed to start. " "error: %s" % e)

        label_before = check_ownership(server_file_path)
        if label_before:
            logging.debug("file ownership on nfs server before save: %s" %
                          label_before)

        # Save domain to nfs pool file
        save_re = virsh.save(vm_name, mnt_file_path, debug=True)
        if save_re.exit_status:
            if not status_error:
                raise error.TestFail("Failed to save domain to nfs pool file.")
        else:
            if status_error:
                raise error.TestFail("Save domain to nfs pool file succeeded, "
                                     "expected Fail.")

        label_after = check_ownership(server_file_path)
        if label_after:
            logging.debug("file ownership on nfs server after save: %s" %
                          label_after)

        # Restore domain from the nfs pool file
        if not save_re.exit_status:
            restore_re = virsh.restore(mnt_file_path, debug=True)
            if restore_re.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to restore domain from nfs "
                                         "pool file.")
            else:
                if status_error:
                    raise error.TestFail("Restore domain from nfs pool file "
                                         "succeeded, expected Fail.")

            label_after_rs = check_ownership(server_file_path)
            if label_after_rs:
                logging.debug(
                    "file ownership on nfs server after restore: %s" %
                    label_after_rs)
Esempio n. 25
0
class EnvState(object):
    """
    Prepare environment state for test according to input parameters,
    and recover it after test.
    """
    sockets = []
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_config = utils_config.LibvirtQemuConfig()
    spice_x509_dir_real = ""
    spice_x509_dir_bak = ""
    vnc_x509_dir_real = ""
    vnc_x509_dir_bak = ""

    def _backup_dir(self, path):
        """
        Backup original libvirt spice or vnc x509 certification directory.
        """
        if os.path.isdir(path):
            backup_path = path + '.bak'
            if os.path.isdir(backup_path):
                shutil.rmtree(backup_path)
            shutil.move(path, backup_path)
            return backup_path
        else:
            return ""

    def _restore_dir(self, path, backup_path):
        """
        Restore original libvirt spice or vnc x509 certification directory
        from backup_path.
        """
        if os.path.isdir(path):
            shutil.rmtree(path)
        if backup_path:
            shutil.move(backup_path, path)

    def __init__(self, params, expected_result):
        spice_tls = params.get("spice_tls", "not_set")
        spice_listen = params.get("spice_listen", "not_set")
        vnc_tls = params.get("vnc_tls", "not_set")
        vnc_listen = params.get("vnc_listen", "not_set")
        spice_x509_dir = params.get("spice_x509_dir", "not_set")
        vnc_x509_dir = params.get("vnc_x509_dir", "not_set")
        spice_prepare_cert = params.get("spice_prepare_cert", "yes")
        vnc_prepare_cert = params.get("vnc_prepare_cert", "yes")
        port_min = params.get("remote_display_port_min", 'not_set')
        port_max = params.get("remote_display_port_max", 'not_set')
        auto_unix_socket = params.get("vnc_auto_unix_socket", 'not_set')
        tls_x509_verify = params.get("vnc_tls_x509_verify", 'not_set')

        if spice_x509_dir == 'not_set':
            self.spice_x509_dir_real = '/etc/pki/libvirt-spice'
        else:
            self.spice_x509_dir_real = spice_x509_dir

        self.spice_x509_dir_bak = self._backup_dir(self.spice_x509_dir_real)
        if spice_prepare_cert == 'yes':
            utils_misc.create_x509_dir(
                self.spice_x509_dir_real,
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test',
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test', 'none', True)

        if vnc_x509_dir == 'not_set':
            self.vnc_x509_dir_real = '/etc/pki/libvirt-vnc'
        else:
            self.vnc_x509_dir_real = vnc_x509_dir

        self.vnc_x509_dir_bak = self._backup_dir(self.vnc_x509_dir_real)
        if vnc_prepare_cert == 'yes':
            utils_misc.create_x509_dir(
                self.vnc_x509_dir_real,
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test',
                '/C=NC/L=Raleigh/O=Red Hat/CN=virt-test', 'none', True)

        if spice_x509_dir == 'not_set':
            del self.qemu_config.spice_tls_x509_cert_dir
        else:
            self.qemu_config.spice_tls_x509_cert_dir = spice_x509_dir

        if vnc_x509_dir == 'not_set':
            del self.qemu_config.vnc_tls_x509_cert_dir
        else:
            self.qemu_config.vnc_tls_x509_cert_dir = vnc_x509_dir

        if spice_tls == 'not_set':
            del self.qemu_config.spice_tls
        else:
            self.qemu_config.spice_tls = spice_tls

        if vnc_tls == 'not_set':
            del self.qemu_config.vnc_tls
        else:
            self.qemu_config.vnc_tls = vnc_tls

        if port_min == 'not_set':
            del self.qemu_config.remote_display_port_min
        else:
            self.qemu_config.remote_display_port_min = port_min

        if port_max == 'not_set':
            del self.qemu_config.remote_display_port_max
        else:
            self.qemu_config.remote_display_port_max = port_max

        if spice_listen == 'not_set':
            del self.qemu_config.spice_listen
        elif spice_listen in ['valid_ipv4', 'valid_ipv6']:
            expected_ip = str(expected_result['spice_ips'][0])
            self.qemu_config.spice_listen = expected_ip
        else:
            self.qemu_config.spice_listen = spice_listen

        if auto_unix_socket == 'not_set':
            del self.qemu_config.vnc_auto_unix_socket
        else:
            self.qemu_config.vnc_auto_unix_socket = auto_unix_socket

        if tls_x509_verify == 'not_set':
            del self.qemu_config.vnc_tls_x509_verify
        else:
            self.qemu_config.vnc_tls_x509_verify = tls_x509_verify

        if vnc_listen == 'not_set':
            del self.qemu_config.vnc_listen
        elif vnc_listen in ['valid_ipv4', 'valid_ipv6']:
            expected_ip = str(expected_result['vnc_ips'][0])
            self.qemu_config.vnc_listen = expected_ip
        else:
            self.qemu_config.vnc_listen = vnc_listen

        self.libvirtd.restart()

    def restore(self):
        """
        Recover environment state after test.
        """
        self._restore_dir(self.spice_x509_dir_real, self.spice_x509_dir_bak)
        self._restore_dir(self.vnc_x509_dir_real, self.vnc_x509_dir_bak)
        self.qemu_config.restore()
        self.libvirtd.restart()
Esempio n. 26
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")
    disk_snapshot_attr = params.get('disk_snapshot_attr', 'external')
    set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no")

    # gluster related params
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in"
                                    " current libvirt version.")

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_src_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.tmpdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(test.tmpdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(test.tmpdir,
                                         params.get(external_disk))
                utils.run("qemu-img create -f qcow2 %s 1G" % disk_path)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            utils.run("chmod 500 %s" % disk_path)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        if replace_vm_disk:
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if set_snapshot_attr:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = vmxml_backup.get_devices(device_type="disk")[0]
            vmxml_new.del_device(disk_xml)
            # set snapshot attribute in disk xml
            disk_xml.snapshot = disk_snapshot_attr
            new_disk = disk.Disk(type_name='file')
            new_disk.xmltreefile = disk_xml.xmltreefile
            vmxml_new.add_device(new_disk)
            logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile)
            vmxml_new.sync()
            vm.start()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            vm.prepare_guest_agent()
            session = vm.wait_for_login()
            if start_ga == "no":
                # The qemu-ga could be running and should be killed
                session.cmd("kill -9 `pidof qemu-ga`")
                # Check if the qemu-ga get killed
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if not stat_ps:
                    # As managed by systemd and set as autostart, qemu-ga
                    # could be restarted, so use systemctl to stop it.
                    session.cmd("systemctl stop qemu-guest-agent")
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        raise error.TestNAError("Fail to stop agent in "
                                                "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)
        else:
            # Remove channel if exist
            if vm.is_alive():
                vm.destroy(gracefully=False)
            xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name)
            xml_inst.remove_agent_channels()
            vm.start()

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                utils.run("qemu-img create -f qcow2 %s 200M" % disk_path)
                virsh.attach_disk(vm_name,
                                  disk_path,
                                  'vd%s' % list(string.lowercase)[i],
                                  debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name,
                                                   options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name,
                                                      options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
                # for multi snapshots without specific snapshot name, the
                # snapshot name is using time string with 1 second
                # incremental, to avoid get snapshot failure with same name,
                # sleep 1 second here.
                if int(multi_num) > 1:
                    time.sleep(1.1)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    raise error.TestFail(
                        "Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            raise error.TestFail(
                                "Run failed but file %s exist" %
                                option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            raise error.TestFail("Domain xml should not be "
                                                 "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    raise error.TestFail("Run failed with right command: %s" %
                                         output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(vm_name, options, option_dict, output,
                                    snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    raise error.TestFail("'%s' was found: %s" %
                                                         (pattern, line))

    finally:
        if vm.is_alive():
            vm.destroy()
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            raise error.TestFail("Still can find snapshot metadata")

        if disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd)

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                if reuse_external:
                    external_disk = "external_disk%s" % i
                    disk_path = os.path.join(test.tmpdir,
                                             params.get(external_disk))
                    if os.path.exists(disk_path):
                        os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': vm_sec_model,
        'relabel': vm_sec_relabel
    }
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {
        'sec_model': per_sec_model,
        'relabel': relabel,
        'sec_label': sec_label
    }
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    logging.debug(
                        "The guest failed to start as expected,"
                        "details see bug: bugzilla.redhat.com/show_bug.cgi"
                        "?id=1165485")
                else:
                    test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                if libvirt_version.version_compare(3, 1,
                                                   0) and disk_type == "block":
                    output = astring.to_text(
                        process.system_output(
                            "nsenter -t %d -m -- ls -l %s" %
                            (vm_pid, disks[disk_target]['source'])))
                    owner, group = output.strip().split()[2:4]
                    disk_context = format_user_group_str(owner, group)
                else:
                    stat_re = os.stat(disks[disk_target]['source'])
                    disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        test.fail("The disk label is not equal to "
                                  "'%s'." % sec_label_id)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
Esempio n. 28
0
def run(test, params, env):
    """
    Test clear_emulator_capabilities parameter in qemu.conf.

    1) Change clear_emulator_capabilities in qemu.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check if qemu process capabilities changed accordingly;
    """
    def get_qemu_process_caps(vm):
        """
        Get the capabilities sets of qemu process of a libvirt VM from proc.

        The raw format of capabilities sets in /proc/${PID}/status is:

        set_name   cap_str
          |           |
          V           V
        CapInh: 0000000000000000
        CapPrm: 0000001fffffffff
        CapEff: 0000001fffffffff
        CapBnd: 0000001fffffffff
        CapAmb: 0000000000000000

        :param vm: A libvirt_vm.VM class instance.
        :return :  A dict using set_name as keys and integer converted from
                   hex cap_str as values.
        :raise cancel: If can not get capabilities from proc.
        """
        if vm.is_dead():
            vm.start()

        pid = vm.get_pid()
        proc_stat_fp = open('/proc/%s/status' % pid)
        caps = {}
        try:
            for line in proc_stat_fp:
                if line.startswith('Cap'):
                    set_name, cap_str = line.split(':\t')
                    caps[set_name] = int(cap_str, 16)
        finally:
            proc_stat_fp.close()

        if len(caps) == 5:
            return caps
        else:
            test.cancel('Host do not support capabilities or '
                        'an error has occured.')

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "name_not_set")
    user = params.get("qemu_user", "not_set")
    clear_emulator_capabilities = params.get("clear_emulator_capabilities",
                                             "not_set")
    vm = env.get_vm(vm_name)

    # Get old qemu process cap sets.
    orig_qemu_caps = get_qemu_process_caps(vm)
    logging.debug('Original capabilities sets of qemu process is: ')
    for s in orig_qemu_caps:
        cap_int = orig_qemu_caps[s]
        logging.debug('%s: %s(%s)' % (s, hex(cap_int), cap_int))

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if user == 'not_set':
            del config.user
        else:
            config.user = user

        if clear_emulator_capabilities == 'not_set':
            del config.clear_emulator_capabilities
        else:
            config.clear_emulator_capabilities = clear_emulator_capabilities

        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with clear_emulator_capabilities = '
                          '%s' % clear_emulator_capabilities)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with clear_emulator_capabilities = '
                      '%s' % clear_emulator_capabilities)

        # Restart VM to create a new qemu process.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        # Get new qemu process cap sets
        new_qemu_caps = get_qemu_process_caps(vm)
        logging.debug('New capabilities sets of qemu process is: ')
        for s in new_qemu_caps:
            cap_int = new_qemu_caps[s]
            logging.debug('%s: %s(%s)' % (s, hex(cap_int), cap_int))

        eff_caps = new_qemu_caps['CapEff']
        if eff_caps == 0:
            if expected_result != 'dropped':
                test.fail(
                    'Qemu process capabilities is not expected to be dropped, '
                    'but CapEff == %s found' % hex(eff_caps))
        else:
            if expected_result == 'dropped':
                test.fail(
                    'Qemu process capabilities is expected to be dropped, '
                    'but CapEff == %s found' % hex(eff_caps))
    finally:
        config.restore()
        libvirtd.restart()
Esempio n. 29
0
def run(test, params, env):
    """
    Test auto_dump_* parameter in qemu.conf.

    1) Change auto_dump_* in qemu.conf;
    2) Restart libvirt daemon;
    4) Check if file open state changed accordingly.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    bypass_cache = params.get("auto_dump_bypass_cache", "not_set")
    panic_model = params.get("panic_model")
    addr_type = params.get("addr_type")
    addr_iobase = params.get("addr_iobase")
    vm = env.get_vm(vm_name)

    if panic_model and not libvirt_version.version_compare(1, 3, 1):
        test.cancel("panic device model attribute not supported"
                    "on current libvirt version")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    dump_path = os.path.join(data_dir.get_tmp_dir(), "dump")
    try:
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            # Set panic device
            panic_dev = Panic()
            if panic_model:
                panic_dev.model = panic_model
            if addr_type:
                panic_dev.addr_type = addr_type
            if addr_iobase:
                panic_dev.addr_iobase = addr_iobase
            vmxml.add_device(panic_dev)
        vmxml.on_crash = "coredump-restart"
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            test.cancel("No 'panic' device in the guest, maybe "
                        "your libvirt version doesn't support it")

        # Setup qemu.conf
        if bypass_cache == 'not_set':
            del config.auto_dump_bypass_cache
        else:
            config.auto_dump_bypass_cache = bypass_cache

        config.auto_dump_path = dump_path
        if os.path.exists(dump_path):
            os.rmdir(dump_path)
        os.mkdir(dump_path)

        # Restart libvirtd to make change valid.
        libvirtd.restart()

        # Restart VM to create a new qemu process.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        session = vm.wait_for_login()
        # Stop kdump in the guest
        session.cmd("service kdump stop", ignore_all_errors=True)
        # Enable sysRq
        session.cmd("echo 1 > /proc/sys/kernel/sysrq")
        try:
            # Crash the guest
            session.cmd("echo c > /proc/sysrq-trigger", timeout=1)
        except ShellTimeoutError:
            pass
        session.close()

        iohelper_pid = process.run('pgrep -f %s' %
                                   dump_path).stdout_text.strip()
        logging.error('%s', iohelper_pid)

        # Get file open flags containing bypass cache information.
        with open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') as fdinfo:
            flags = 0
            for line in fdinfo.readlines():
                if line.startswith('flags:'):
                    flags = int(line.split()[1], 8)
                    logging.debug('file open flag is: %o', flags)

        with open('/proc/%s/cmdline' % iohelper_pid) as cmdinfo:
            cmdline = cmdinfo.readline()
            logging.debug(cmdline.split())

        # Kill core dump process to speed up test
        try:
            process.run('kill %s' % iohelper_pid)
        except process.CmdError as detail:
            logging.debug("Dump already done:\n%s", detail)

        arch = platform.machine()

        if arch == 'x86_64':
            # Check if bypass cache flag set or unset accordingly.
            if (flags & 0o40000) and bypass_cache != '1':
                test.fail('auto_dump_bypass_cache is %s but flags '
                          'is %o' % (bypass_cache, flags))
            if not (flags & 0o40000) and bypass_cache == '1':
                test.fail('auto_dump_bypass_cache is %s but flags '
                          'is %o' % (bypass_cache, flags))
        elif arch == 'ppc64le':
            # Check if bypass cache flag set or unset accordingly.
            if (flags & 0o400000) and bypass_cache != '1':
                test.fail('auto_dump_bypass_cache is %s but flags '
                          'is %o' % (bypass_cache, flags))
            if not (flags & 0o400000) and bypass_cache == '1':
                test.fail('auto_dump_bypass_cache is %s but flags '
                          'is %o' % (bypass_cache, flags))
        else:
            test.cancel("Unknown Arch. Do the necessary changes to" "support")

    finally:
        backup_xml.sync()
        config.restore()
        libvirtd.restart()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Esempio n. 30
0
def run(test, params, env):
    """
    Test auto_dump_* parameter in qemu.conf.

    1) Change auto_dump_* in qemu.conf;
    2) Restart libvirt daemon;
    4) Check if file open state changed accordingly.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    bypass_cache = params.get("auto_dump_bypass_cache", "not_set")
    vm = env.get_vm(vm_name)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            # Set panic device
            panic_dev = Panic()
            panic_dev.addr_type = "isa"
            panic_dev.addr_iobase = "0x505"
            vmxml.add_device(panic_dev)
        vmxml.on_crash = "coredump-restart"
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            raise error.TestNAError("No 'panic' device in the guest, maybe "
                                    "your libvirt version doesn't support it")

        # Setup qemu.conf
        if bypass_cache == 'not_set':
            del config.auto_dump_bypass_cache
        else:
            config.auto_dump_bypass_cache = bypass_cache

        dump_path = os.path.join(test.tmpdir, "dump")
        config.auto_dump_path = dump_path
        os.mkdir(dump_path)

        # Restart libvirtd to make change valid.
        libvirtd.restart()

        # Restart VM to create a new qemu process.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        session = vm.wait_for_login()
        # Stop kdump in the guest
        session.cmd("service kdump stop", ignore_all_errors=True)
        # Enable sysRq
        session.cmd("echo 1 > /proc/sys/kernel/sysrq")
        try:
            # Crash the guest
            session.cmd("echo c > /proc/sysrq-trigger", timeout=1)
        except ShellTimeoutError:
            pass
        session.close()

        iohelper_pid = utils.run('pgrep -f %s' % dump_path).stdout.strip()
        logging.error('%s', iohelper_pid)

        # Get file open flags containing bypass cache information.
        fdinfo = open('/proc/%s/fdinfo/1' % iohelper_pid, 'r')
        flags = 0
        for line in fdinfo.readlines():
            if line.startswith('flags:'):
                flags = int(line.split()[1], 8)
                logging.debug('File open flag is: %o', flags)
        fdinfo.close()

        cmdline = open('/proc/%s/cmdline' % iohelper_pid).readline()
        logging.debug(cmdline.split())

        # Kill core dump process to speed up test
        utils.run('kill %s' % iohelper_pid)

        # Check if bypass cache flag set or unset accordingly.
        if (flags & 040000) and bypass_cache != '1':
            raise error.TestFail('auto_dump_bypass_cache is %s but flags '
                                 'is %o' % (bypass_cache, flags))
        if not (flags & 040000) and bypass_cache == '1':
            raise error.TestFail('auto_dump_bypass_cache is %s but flags '
                                 'is %o' % (bypass_cache, flags))
    finally:
        backup_xml.sync()
        config.restore()
        libvirtd.restart()