def run(test, params, env):
    """
    Test virt-admin srv-threadpool-info

    1) Change the threadpool related parameters in libvirtd.conf;
    2) Restart libvirtd daemon;
    3) Check whether the parameter value listed by srv-threadpool-info
       are the same with the above settings.
    """
    min_workers = params.get("min_workers")
    max_workers = params.get("max_workers")
    prio_workers = params.get("prio_workers")
    admin_min_workers = params.get("admin_min_workers")
    admin_max_workers = params.get("admin_max_workers")
    server_name = params.get("server_name")

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        if server_name == "libvirtd":
            config.min_workers = min_workers
            config.max_workers = max_workers
            config.prio_workers = prio_workers
        elif server_name == "admin":
            config.admin_min_workers = admin_min_workers
            config.admin_max_workers = admin_max_workers

        libvirtd.restart()
        vp = virt_admin.VirtadminPersistent()
        result = vp.srv_threadpool_info(server_name,
                                        ignore_status=True,
                                        debug=True)

        output = result.stdout.strip().splitlines()
        out_split = [item.split(':') for item in output]
        out_dict = dict([[item[0].strip(), item[1].strip()]
                         for item in out_split])

        if result.exit_status:
            raise exceptions.TestFail("This operation should success "
                                      "but failed! Output: \n %s" % result)
        else:
            if server_name == "libvirtd":
                if not (out_dict["minWorkers"] == min_workers
                        and out_dict["maxWorkers"] == max_workers
                        and out_dict["prioWorkers"] == prio_workers):
                    raise exceptions.TestFail(
                        "attributes info listed by "
                        "srv-threadpool-info is not correct!")
            elif server_name == "admin":
                if not (out_dict["minWorkers"] == admin_min_workers
                        and out_dict["maxWorkers"] == admin_max_workers):
                    raise exceptions.TestFail(
                        "attributes info listed by "
                        "srv-threadpool-info is not correct!")
    finally:
        config.restore()
        libvirtd.restart()
Exemplo n.º 2
0
    def test_accessers(self):
        config_file = tempfile.NamedTemporaryFile()
        config_path = config_file.name
        config_file.close()
        try:
            config_file = open(config_path, 'w')
            config_file.write('')
            config_file.close()
            config = utils_config.LibvirtdConfig(path=config_path)

            # Test internal property.
            self.assertEqual(config.conf_path, config_path)

            # Test undefined property.
            try:
                config.undefined_property
            except Exception as e:
                self.assertEqual(
                    utils_config.LibvirtConfigUnknownKeyError, e.__class__)
                self.assertTrue('Unknown config key' in str(e))

            # Test defined boolean property.
            self.assertEqual(config.listen_tls, None)
            config.listen_tls = 1
            self.assertEqual(config.get_raw('listen_tls'), '1')
            self.assertEqual(config.listen_tls, 1)
            config.listen_tls = False
            self.assertEqual(config.get_raw('listen_tls'), '0')
            self.assertEqual(config.listen_tls, 0)
            config.listen_tls = "1"
            self.assertEqual(config.get_raw('listen_tls'), '1')
            config.listen_tls = "undefined"
            self.assertEqual(config.get_raw('listen_tls'), 'undefined')
            del config.listen_tls
            self.assertEqual(config.listen_tls, None)

            # Test defined string property.
            self.assertEqual(config.host_uuid, None)
            config.host_uuid = 1
            self.assertEqual(config.get_raw('host_uuid'), '"1"')
            config.host_uuid = 'a'
            self.assertEqual(config.get_raw('host_uuid'), '"a"')

            # Test defined integer property.
            self.assertEqual(config.max_clients, None)
            config.max_clients = 1
            self.assertEqual(config.get_raw('max_clients'), '1')

            # Test defined list property.
            self.assertEqual(config.access_drivers, None)
            config.access_drivers = [1, "a"]
            self.assertEqual(
                config.get_raw('access_drivers'), '["1", "a"]')
        finally:
            os.remove(config_path)
def config_libvirt(params):
    """
    Configure /etc/libvirt/libvirtd.conf
    """
    libvirtd_conf = utils_config.LibvirtdConfig()

    for k, v in params.items():
        libvirtd_conf[k] = v

    logging.debug("The libvirtd config file content is:\n%s" % libvirtd_conf)

    return libvirtd_conf
def run(test, params, env):
    """
    Test virt-admin srv-clients-info

    1) Change the clients related parameters in libvirtd.conf;
    2) Restart libvirtd daemon;
    3) Start several virsh connections;
    4) Check whether the parameters value listed by srv-clents-info
       are the same with the above settings.
    """
    max_clients = params.get("max_clients")
    max_anonymous_clients = params.get("max_anonymous_clients")
    server_name = params.get("server_name")
    num_clients = params.get("num_clients")

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        config.max_clients = max_clients
        config.max_anonymous_clients = max_anonymous_clients
        libvirtd.restart()
        vp = virt_admin.VirtadminPersistent()

        virsh_instant = []
        for _ in range(int(num_clients)):
            virsh_instant.append(virsh.VirshPersistent(uri="qemu:///system"))

        result = vp.srv_clients_info(server_name,
                                     ignore_status=True,
                                     debug=True)
        output = result.stdout.strip().splitlines()
        out_split = [item.split(':') for item in output]
        out_dict = dict([[item[0].strip(), item[1].strip()]
                         for item in out_split])

        if result.exit_status:
            raise exceptions.TestFail("This operation should success "
                                      "but failed. Output:\n %s" % result)
        else:
            if not (out_dict["nclients_max"] == max_clients and
                    out_dict["nclients_unauth_max"] == max_anonymous_clients):
                raise exceptions.TestFail("attributes info listed by "
                                          "srv-clients-info is not correct.")
            if not out_dict["nclients"] == num_clients:
                raise exceptions.TestFail(
                    "the number of clients connect to libvirtd "
                    "is not correct.")
    finally:
        config.restore()
        libvirtd.restart()
Exemplo n.º 5
0
def run(test, params, env):
    """
    Check libvirtd log after some operations about the interface
    1. Configure the libvirtd log;
    2. Do the operations about interface;
    3. Check the libvirtd log for errors;
    4. Clear the env;
    """
    name_1 = params.get("name_1")
    name_2 = params.get("name_2")
    config_libvirtd = "yes" == params.get("config_libvirtd")
    log_file = params.get("log_file", "libvirtd.log")
    iface_name = utils_net.get_net_if(state="UP")[0]

    try:
        # config libvirtd
        if config_libvirtd:
            config = utils_config.LibvirtdConfig()
            log_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            log_outputs = "1:file:%s" % log_path
            config.log_outputs = log_outputs
            config.log_level = 1
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        process.run(
            "ip l add link {0} name {1} type macvlan; ip l set {1} name {2}".
            format(iface_name, name_1, name_2),
            ignore_status=True,
            shell=True)
        logging.debug("Check the log, there should be no error")
        time.sleep(5)
        check_cmd = "grep -i error %s" % log_path
        out = process.run(check_cmd, ignore_status=True,
                          shell=True).stdout_text.strip()
        logging.debug("the log error is %s", out)
        if 'virFileReadAll' in out or "virNetDevGetLinkInfo" in out:
            if libvirt_version.version_compare(6, 3, 0):
                test.fail("libvirtd.log get error: %s" % out)
            else:
                test.fail("the bug is not fixed on this libvirt version")

    finally:
        process.run("ip l delete %s; ip l delete %s" % (name_2, name_1),
                    ignore_status=True,
                    shell=True)
        if config_libvirtd:
            config.restore()
            libvirtd.restart()
Exemplo n.º 6
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            test.fail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        ret = process.run(cmd, shell=True)
        cpu_num = int(ret.stdout_text.split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num + 1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            test.fail("vcpuinfo cpu affinity not expected")

    def numa_mode_check(mode_nodeset):
        """
        when the mode = 'preferred' or 'interleave', it is better to check
        numa_maps.
        """
        vm_pid = vm.get_pid()
        numa_map = '/proc/%s/numa_maps' % vm_pid
        # Open a file
        with open(numa_map) as file:
            for line in file.readlines():
                if line.split()[1] != mode_nodeset:
                    test.fail("numa node and nodeset %s is "
                              "not expected" % mode_nodeset)

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", " ".join(map(str, node_list)))

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = numa_memory["nodeset"]
            if '-' in nodes:
                for nnode in range(int(nodes.split('-')[0]),
                                   int(nodes.split('-')[1]) + 1):
                    ppc_memory_nodeset += str(node_list[nnode]) + ','
            else:
                node_lst = nodes.split(',')
                for nnode in range(len(node_lst)):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[nnode])]) + ','
            numa_memory["nodeset"] = ppc_memory_nodeset[:-1]
        except (KeyError, IndexError):
            pass

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop", ignore_status=True)
            process.run("virtlogd -d")
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted successfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num + 1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes'

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    if not dynamic_parameters:
                        test.cancel("nodeset %s out of range" %
                                    numa_memory['nodeset'])
                    else:
                        if '-' in numa_memory['nodeset']:
                            nodes_size = len(numa_memory['nodeset'].split('-'))
                        else:
                            nodes_size = len(numa_memory['nodeset'].split(','))
                        if nodes_size > len(node_list):
                            test.cancel("nodeset %s out of range" %
                                        numa_memory['nodeset'])
                        else:
                            numa_memory['nodeset'] = node_list[:nodes_size]

        if vcpu_cpuset:
            pre_cpuset = cpu.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                if not dynamic_parameters:
                    test.cancel("cpuset %s out of range" % vcpu_cpuset)
                else:
                    random_cpus = []
                    # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is
                    # added twice or the list of selected CPUs is not long enough
                    for i in range(
                            len([int(i) for i in vcpu_cpuset.split(',')])):
                        rand_cpu = random.randint(min(cpu_list), max(cpu_list))
                        while rand_cpu in random_cpus:
                            rand_cpu = random.randint(min(cpu_list),
                                                      max(cpu_list))
                        random_cpus.append(rand_cpu)
                    random_cpus.sort()
                    vcpu_cpuset = (','.join(
                        [str(cpu_num) for cpu_num in random_cpus]))
                    pre_cpuset = cpu.cpus_parser(vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vmxml.xmltreefile.find('cputune'):
            vmxml.xmltreefile.remove_by_xpath('/cputune')
        else:
            logging.debug('No vcpupin found')
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem // 1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                test.fail("memory config %s not expected "
                          "after domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    test.fail("cpuset not found in domain xml.")

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n "
                          "error: %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node, vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    test.fail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            if numa_memory.get('mode') == 'strict':
                left_node = [
                    node_list.index(i) for i in node_list if i not in used_node
                ]
                used_node = [node_list.index(i) for i in used_node]
                mem_compare(used_node, left_node)
            elif numa_memory.get('mode') == 'preferred':
                mode_nodeset = 'prefer:' + numa_memory.get('nodeset')
                numa_mode_check(mode_nodeset)
            else:
                mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get(
                    'nodeset')
                numa_mode_check(mode_nodeset)
        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                test.fail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                logging.warning('numa log:\n%s\n' %
                                numad_log[0].split("numad ")[-1])
                logging.warning('numa cmd opt:\n%s\n' % numad_cmd_opt)
                test.fail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = cpu.cpus_parser(numad_ret)
            left_node = [
                node_list.index(i) for i in node_list if i not in numad_node
            ]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                if numa_memory.get('mode') == 'strict':
                    mem_compare(numad_node_seq, left_node)
                elif numa_memory.get('mode') == 'preferred':
                    mode_nodeset = 'prefer:' + numad_ret
                    numa_mode_check(mode_nodeset)
                else:
                    mode_nodeset = numa_memory.get('mode') + ':' + numad_ret
                    numa_mode_check(mode_nodeset)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        test.fail("cpu usage in node %s is not expected" % i)
                cpu_affinity_check(node=numad_node)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Exemplo n.º 7
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    host_arch = platform.machine()
    virsh_dargs = {'debug': True, 'ignore_status': False}

    if not utils_package.package_install(["lsof"]):
        test.cancel("Failed to install dependency package lsof" " on host")

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            iface.target = {"dev": target_dev}
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            logging.debug("iface.target is %s" % target_dev)
            iface.target = {"dev": target_dev}
        if iface.address:
            del iface.address
        if set_ip:
            iface.ips = [ast.literal_eval(x) for x in set_ips]
        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)
            logging.info("Unprivileged users can't use 'dac' security driver,"
                         " removing from domain xml if present...")
            vmxml.del_seclabel([('model', 'dac')])

            # Set vm memory to 2G if it's larger than 2G
            if vmxml.memory > 2097152:
                vmxml.memory = vmxml.current_mem = 2097152

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
                if define_error:
                    test.fail("Define VM succeed, but it should fail")
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {
            "csum": "tx-checksumming",
            "tso4": "tcp-segmentation-offload",
            "tso6": "tx-tcp6-segmentation",
            "ecn": "tx-tcp-ecn-segmentation",
            "ufo": "udp-fragmentation-offload"
        }
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = process.run("ethtool -k %s | head -18" % if_name, shell=True)
            ret, output = out.exit_status, out.stdout_text
        if ret:
            test.fail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in list(driver_options.keys()):
            if offload in offloads:
                if (output.count(offloads[offload]) and not output.count(
                        "%s: %s" %
                    (offloads[offload], driver_options[offload]))):
                    test.fail("offloads option %s: %s isn't"
                              " correct in ethtool output" %
                              (offloads[offload], driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            test.fail("Can't find interface with mac"
                      " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in list(driver_dict.keys()):
            if not driver_dict[driver_opt] == iface.driver.driver_attr[
                    driver_opt]:
                test.fail("Can't see driver option %s=%s in vm xml" %
                          (driver_opt, driver_dict[driver_opt]))
            else:
                logging.info("Find %s=%s in vm xml" %
                             (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if ("dev" not in iface.target
                    or not iface.target["dev"].startswith(iface_target)):
                test.fail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and "mode" in iface.source:
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = process.run(cmd, shell=True).stdout_text
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not re.search(r"macvtap\s+mode %s" % mode, output):
                    test.fail("Failed to verify macvtap mode")
        # Check if the "target dev" is set successfully
        # 1. Target dev name with prefix as "vnet" will always be override;
        # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct
        # type interface will be override;
        # 3. Other scenarios, the target dev should be set successfully.
        if test_target:
            if target_dev != iface.target["dev"]:
                if target_dev.startswith("vnet") or \
                        (iface_type == "direct" and
                         (target_dev.startswith("macvtap") or
                          target_dev.startswith("macvlan"))):
                    logging.debug("target dev %s is override" % target_dev)
                else:
                    test.fail("Failed to set target dev to %s", target_dev)
            else:
                logging.debug("target dev set successfully to %s",
                              iface.target["dev"])

    def run_cmdline_test(iface_mac, host_arch):
        """
        Test qemu command line
        :param iface_mac: expected MAC
        :param host_arch: host architecture, e.g. x86_64
        :raise avocado.core.exceptions.TestError: if preconditions are not met
        :raise avocado.core.exceptions.TestFail: if commandline doesn't match
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = process.run(cmd, shell=True)
        logging.debug("Command line %s", ret.stdout_text)
        if test_vhost_net:
            if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver:
                test.fail("Can't see vhost options in"
                          " qemu-kvm command line")

        if iface_model == "virtio":
            if host_arch == 's390x':
                model_option = "device virtio-net-ccw"
            else:
                model_option = "device virtio-net-pci"
        elif iface_model == 'rtl8139':
            model_option = "device rtl8139"
        else:
            test.error(
                "Don't know which device driver to expect on qemu cmdline"
                " for iface_model %s" % iface_model)
        iface_cmdline = re.findall(
            r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text)
        if not iface_cmdline:
            test.fail("Can't see %s with mac %s in command"
                      " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in list(iface_driver_dict.keys()):
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    if "pci" in model_option:
                        driver_dict["vectors"] = str(
                            int(iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in list(driver_dict.keys()):
            if (driver_opt not in cmd_opt
                    or not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                test.fail("Can't see option '%s=%s' in qemu-kvm "
                          " command line" %
                          (driver_opt, driver_dict[driver_opt]))
            logging.info("Find %s=%s in qemu-kvm command line" %
                         (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout_text.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["vhost"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac),
                            10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            logging.debug(
                "Duplicated IP address on guest. Check bug: "
                "https://bugzilla.redhat.com/show_bug.cgi?id=1147238")
        for vm_ip in vm_ips:
            if not vm_ip or vm_ip != expect_ip:
                logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip)
                test.fail("Found wrong IP address" " on guest")
        # Check gateway address
        gateway = str(utils_net.get_default_gateway(False, session))
        if expect_gw not in gateway:
            test.fail("The gateway on guest is %s, while expect is %s" %
                      (gateway, expect_gw))
        # Check dns server address
        ns_list = utils_net.get_guest_nameserver(session)
        if expect_ns not in ns_list:
            test.fail("The dns found is %s, which expect is %s" %
                      (ns_list, expect_ns))

    def check_mcast_network(session, add_session):
        """
        Check multicast ip address on guests

        :param session: vm session
        :param add_session: additional vm session
        """
        src_addr = ast.literal_eval(iface_source)['address']
        vms_sess_dict = {vm_name: session, additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't find multicast ip address" " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in list(vms_sess_dict.keys()):
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                test.fail("Can't get multicast ip" " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            test.fail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_package.package_install(["omping"]):
            test.error("Failed to install omping" " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr,
                "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values()))))
        # Run a backgroup job waiting for connection of client
        bgjob = utils_misc.AsyncJob(cmd)

        # Run omping client on guests
        for vms in list(vms_sess_dict.keys()):
            # omping should be installed first
            if not utils_package.package_install(["omping"],
                                                 vms_sess_dict[vms]):
                test.error("Failed to install omping" " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%')
                    or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                test.fail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    def get_iface_model(iface_model, host_arch):
        """
        Get iface_model. On s390x use default model 'virtio' if non-virtio given
        :param iface_model: value as by test configuration or default
        :param host_arch: host architecture, e.g. x86_64
        :return: iface_model
        """
        if 's390x' == host_arch and 'virtio' not in iface_model:
            return "virtio"
        else:
            return iface_model

    def check_vhostuser_guests(session1, session2):
        """
        Check the vhostuser interface in guests

        param session1: Session of original guest
        param session2: Session of original additional guest
        """
        logging.debug("iface details is %s" %
                      libvirt.get_interface_details(vm_name))
        vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac'])
        vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac'])

        utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip)
        utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip)
        ping_status, ping_output = utils_net.ping(dest=guest2_ip,
                                                  count='3',
                                                  timeout=5,
                                                  session=session1)
        logging.info("output:%s" % ping_output)
        if ping_status != 0:
            if ping_expect_fail:
                logging.info("Can not ping guest2 as expected")
            else:
                test.fail("Can not ping guest2 from guest1")
        else:
            if ping_expect_fail:
                test.fail("Ping guest2 successfully not expected")
            else:
                logging.info("Can ping guest2 from guest1")

    def get_ovs_statis(ovs):
        """
        Get ovs-vsctl interface statistics and format in dict

        param ovs: openvswitch instance
        """
        ovs_statis_dict = {}
        ovs_iface_info = ovs.ovs_vsctl(["list",
                                        "interface"]).stdout_text.strip()
        ovs_iface_list = re.findall(
            'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info,
            re.S)
        logging.info("ovs iface list is %s", ovs_iface_list)
        # Dict of iface name and statistics
        for iface_name in vhostuser_names.split():
            for ovs_iface in ovs_iface_list:
                if iface_name == eval(ovs_iface[0]):
                    format_statis = dict(
                        re.findall(r'(\S*?)=(\d*?),', ovs_iface[1]))
                    ovs_statis_dict[iface_name] = format_statis
                    break
        return ovs_statis_dict

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = get_iface_model(params.get("iface_model", "virtio"),
                                  host_arch)
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    ovs_br_name = params.get("ovs_br_name")
    vhostuser_names = params.get("vhostuser_names")
    attach_device = params.get("attach_iface_device")
    expect_tx_size = params.get("expect_tx_size")
    guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1")
    guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get("test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get("test_vhost_net", "no")
    test_option_offloads = "yes" == params.get("test_option_offloads", "no")
    test_iface_user = "******" == params.get("test_iface_user", "no")
    test_iface_mcast = "yes" == params.get("test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    restart_vm = "yes" == params.get("restart_vm", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")
    check_guest_trans = "yes" == params.get("check_guest_trans", "no")
    set_ip = "yes" == params.get("set_user_ip", "no")
    set_ips = params.get("set_ips", "").split()
    expect_ip = params.get("expect_ip")
    expect_gw = params.get("expect_gw")
    expect_ns = params.get("expect_ns")
    test_target = "yes" == params.get("test_target", "no")
    target_dev = params.get("target_dev", None)

    # test params for vhostuser test
    huge_page = ast.literal_eval(params.get("huge_page", "{}"))
    numa_cell = ast.literal_eval(params.get("numa_cell", "{}"))
    additional_iface_source = ast.literal_eval(
        params.get("additional_iface_source", "{}"))
    vcpu_num = params.get("vcpu_num")
    cpu_mode = params.get("cpu_mode")
    hugepage_num = params.get("hugepage_num")
    log_pattern = params.get("log_pattern")

    # judgement params for vhostuer test
    need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no")
    ping_expect_fail = "yes" == params.get("ping_expect_fail", "no")
    check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no")
    check_statistics = "yes" == params.get("check_statistics", "no")
    enable_multiqueue = "yes" == params.get("enable_multiqueue", "no")

    queue_size = None
    if iface_driver:
        driver_dict = ast.literal_eval(iface_driver)
        if "queues" in driver_dict:
            queue_size = int(driver_dict.get("queues"))

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            test.cancel("Offloading/backend options not "
                        "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            test.cancel("Queues options not supported"
                        " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            test.cancel("qemu-bridge-helper not supported" " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        process.run(cmd, shell=True)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    libvirtd_log_path = None
    libvirtd_conf = None
    if check_libvirtd_log:
        libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
        libvirtd.restart()

    # Prepare vhostuser
    ovs = None
    if need_vhostuser_env:
        # Reserve selinux status
        selinux_mode = utils_selinux.get_status()
        # Reserve orig page size
        orig_size = utils_memory.get_num_huge_pages()
        ovs_dir = data_dir.get_tmp_dir()
        ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name,
                                            vhostuser_names, queue_size)

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    process.run("modprobe vhost-net", shell=True)
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
                if define_error:
                    return

            if test_target:
                logging.debug("Setting target device name to %s", target_dev)
                modify_iface_xml(update=False)

            if rm_vhost_driver:
                # remove vhost driver on host and
                # the character file /dev/vhost-net
                cmd = ("modprobe -r {0}; "
                       "rm -f /dev/vhost-net".format("vhost_net"))
                if process.system(cmd, ignore_status=True, shell=True):
                    test.error("Failed to remove vhost_net driver")
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                process.system(cmd, shell=True)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Add hugepage and update cpu for vhostuser testing
            if huge_page:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                membacking = vm_xml.VMMemBackingXML()
                hugepages = vm_xml.VMHugepagesXML()
                pagexml = hugepages.PageXML()
                pagexml.update(huge_page)
                hugepages.pages = [pagexml]
                membacking.hugepages = hugepages
                vmxml.mb = membacking

                vmxml.vcpu = int(vcpu_num)
                cpu_xml = vm_xml.VMCPUXML()
                cpu_xml.xml = "<cpu><numa/></cpu>"
                cpu_xml.numa_cell = cpu_xml.dicts_to_cells([numa_cell])
                cpu_xml.mode = cpu_mode
                if cpu_mode == "custom":
                    vm_capability = capability_xml.CapabilityXML()
                    cpu_xml.model = vm_capability.model
                vmxml.cpu = cpu_xml

                vmxml.sync()
                logging.debug("xmltreefile:%s", vmxml.xmltreefile)

            # Clone additional vm
            if additional_guest:
                add_vm_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name,
                                                add_vm_name,
                                                True,
                                                timeout=timeout)
                additional_vm = vm.clone(add_vm_name)
                # Update iface source if needed
                if additional_iface_source:
                    add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name)
                    add_xml_devices = add_vmxml.devices
                    add_iface_index = add_xml_devices.index(
                        add_xml_devices.by_device_tag("interface")[0])
                    add_iface = add_xml_devices[add_iface_index]
                    add_iface.source = additional_iface_source
                    add_vmxml.devices = add_xml_devices
                    add_vmxml.xmltreefile.write()
                    add_vmxml.sync()

                    logging.debug("add vm xmltreefile:%s",
                                  add_vmxml.xmltreefile)
                additional_vm.start()
                # additional_vm.wait_for_login()
                username = params.get("username")
                password = params.get("password")
                add_session = additional_vm.wait_for_serial_login(
                    username=username, password=password)

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'" %
                       (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), r"[\#\$]\s*$",
                                      60)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    test.error("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                test.fail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret, status_error)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac, host_arch)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(ifname_guest,
                                          ast.literal_eval(iface_driver_host),
                                          session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name, iface_mac)
                    check_offloads_option(ifname_host,
                                          ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session, add_session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    test.fail("Guest can't get a" " valid ip address")
            # Check guest RX/TX ring
            if check_guest_trans:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                ret, outp = session.cmd_status_output("ethtool -g %s" %
                                                      ifname_guest)
                if ret:
                    test.fail("ethtool return error code")
                logging.info("ethtool output is %s", outp)
                driver_dict = ast.literal_eval(iface_driver)
                if expect_tx_size:
                    driver_dict['tx_queue_size'] = expect_tx_size
                for outp_p in outp.split("Current hardware"):
                    if 'rx_queue_size' in driver_dict:
                        if re.search(
                                r"RX:\s*%s" % driver_dict['rx_queue_size'],
                                outp_p):
                            logging.info("Find RX setting RX:%s by ethtool",
                                         driver_dict['rx_queue_size'])
                        else:
                            test.fail("Cannot find matching rx setting")
                    if 'tx_queue_size' in driver_dict:
                        if re.search(
                                r"TX:\s*%s" % driver_dict['tx_queue_size'],
                                outp_p):
                            logging.info("Find TX settint TX:%s by ethtool",
                                         driver_dict['tx_queue_size'])
                        else:
                            test.fail("Cannot find matching tx setting")
            if test_target:
                logging.debug("Check if the target dev is set")
                run_xml_test(iface_mac)

            # Check vhostuser guest
            if additional_iface_source:
                check_vhostuser_guests(session, add_session)

            # Check libvirtd log
            if check_libvirtd_log:
                find = 0
                with open(libvirtd_log_path) as f:
                    lines = "".join(f.readlines())
                    if log_pattern in lines:
                        logging.info("Finding msg<%s> in libvirtd log",
                                     log_pattern)
                    else:
                        test.fail("Can not find msg:<%s> in libvirtd.log" %
                                  log_pattern)

            # Check statistics
            if check_statistics:
                session.sendline("ping %s" % guest2_ip)
                add_session.sendline("ping %s" % guest1_ip)
                time.sleep(5)
                vhost_name = vhostuser_names.split()[0]
                ovs_statis_dict = get_ovs_statis(ovs)[vhost_name]
                domif_info = {}
                domif_info = libvirt.get_interface_details(vm_name)
                virsh.domiflist(vm_name, debug=True)
                domif_stat_result = virsh.domifstat(vm_name, vhost_name)
                if domif_stat_result.exit_status != 0:
                    test.fail("domifstat cmd fail with msg:%s" %
                              domif_stat_result.stderr)
                else:
                    domif_stat = domif_stat_result.stdout.strip()
                logging.debug("vhost_name is %s, domif_stat is %s", vhost_name,
                              domif_stat)
                domif_stat_dict = dict(
                    re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat))
                logging.debug("ovs_statis is %s, domif_stat is %s",
                              ovs_statis_dict, domif_stat_dict)
                ovs_cmp_dict = {
                    'tx_bytes': ovs_statis_dict['rx_bytes'],
                    'tx_drop': ovs_statis_dict['rx_dropped'],
                    'tx_errs': ovs_statis_dict['rx_errors'],
                    'tx_packets': ovs_statis_dict['rx_packets'],
                    'rx_bytes': ovs_statis_dict['tx_bytes'],
                    'rx_drop': ovs_statis_dict['tx_dropped']
                }
                logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict)
                for dict_key in ovs_cmp_dict.keys():
                    if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]:
                        test.fail(
                            "Find ovs %s result (%s) different with domifstate result (%s)"
                            % (dict_key, ovs_cmp_dict[dict_key],
                               domif_stat_dict[dict_key]))
                    else:
                        logging.info("ovs %s value %s is same with domifstate",
                                     dict_key, domif_stat_dict[dict_key])

            # Check multi_queue
            if enable_multiqueue:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                for comb_size in (queue_size, queue_size - 1):
                    logging.info("Setting multiqueue size to %s" % comb_size)
                    session.cmd_status("ethtool -L %s combined %s" %
                                       (ifname_guest, comb_size))
                    ret, outp = session.cmd_status_output("ethtool -l %s" %
                                                          ifname_guest)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != queue_size or int(cur_comb) != int(
                                comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s, queue_size: %s"
                                % (comb_size, pre_comb, cur_comb, queue_size))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)

            session.close()
            if additional_guest:
                add_session.close()

            # Restart libvirtd and guest, then test again
            if restart_libvirtd:
                libvirtd.restart()

            if restart_vm:
                vm.destroy(gracefully=True)
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device and not status_error:
                ret = virsh.detach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                test.fail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            process.system("modprobe vhost_net", shell=True)
        if unprivileged_user:
            virsh.remove_domain(vm_name, **virsh_dargs)
            process.run('rm -f %s' % dst_disk, shell=True)
        if additional_vm:
            virsh.remove_domain(additional_vm.name, "--remove-all-storage")
            # Kill all omping server process on host
            process.system("pidof omping && killall omping",
                           ignore_status=True,
                           shell=True)
        if vm.is_alive():
            vm.destroy(gracefully=True)
        vmxml_backup.sync()

        if need_vhostuser_env:
            utils_net.clean_ovs_env(selinux_mode=selinux_mode,
                                    page_size=orig_size,
                                    clean_ovs=True)

        if libvirtd_conf:
            libvirtd_conf.restore()
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Exemplo n.º 8
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    check_finish_job = "yes" == params.get("check_finish_job", "yes")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel(
            "Forbid using relative path or file name only is added since libvirt-3.0.0"
        )

    if "--transient-job" in options and not libvirt_version.version_compare(
            4, 5, 0):
        test.cancel(
            "--transient-job option is supported until libvirt 4.5.0 version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        # Remove non-storage disk such as 'cdrom'
        for disk in disks:
            if disk.device != 'disk':
                disks.remove(disk)
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if 'file' in disk_xml.source.attrs:
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs
              or 'pool' in disk_xml.source.attrs):
            if (disk_xml.type_name == 'block'
                    or disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if 'name' in new_attrs:
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif 'pool' in new_attrs:
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine("--nvram")
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() +
                                          cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        if "--transient-job" in options:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(
                blockcopy_thread, (vm_name, target, dest_path, options))
            kill_blockcopy_process()
            utl.check_blockjob(vm_name, target)
            return

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth
                            in ['0B', '0M']) and not utl.check_blockjob(
                                vm_name, target, "bandwidth", bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0 and check_finish_job:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() +
                                          cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip())
                            or chk_libvirtd_log(libvirtd_log_path, log_pattern,
                                                "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name, ignore_status=True).
                exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        cpu_num = int(utils.run(cmd).stdout.strip().split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num+1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            raise error.TestFail("vcpuinfo cpu affinity not expected")

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = "/var/tmp/virt-test.conf"
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num+1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    raise error.TestNAError("nodeset %s out of range" %
                                            numa_memory['nodeset'])

        if vcpu_cpuset:
            pre_cpuset = utils_test.libvirt.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                raise error.TestNAError("cpuset %s out of range" %
                                        vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem/1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                raise error.TestFail("memory config %s not expected after "
                                     "domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    raise error.TestFail("cpuset not found in domain xml.")

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node,
            vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    raise error.TestFail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node = [node_list.index(i) for i in node_list if i not in used_node]
            used_node = [node_list.index(i) for i in used_node]
            mem_compare(used_node, left_node)

        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                raise error.TestFail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = utils_test.libvirt.cpus_parser(numad_ret)
            left_node = [node_list.index(i) for i in node_list if i not in numad_node]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                mem_compare(numad_node_seq, left_node)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        raise error.TestFail("cpu usage in node %s is not "
                                             "expected" % i)
                cpu_affinity_check(node=numad_node)
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test unix_sock_* parameter in libvird.conf.

    1) Change unix_sock_* in libvirtd.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check if libvirtd socket file changed accordingly;
    """
    def mode_bits_to_str(bits):
        """
        Translate a integer returned by stat.S_IMODE() to 4-digit permission
        string.
        :param bits: A integer returned by stat.S_IMODE(), like "511".
        :return : Translated 4-digit permission string, like "0777".
        """
        ubit = bits % 8
        bits //= 8
        gbit = bits % 8
        bits //= 8
        obit = bits % 8
        bits //= 8
        return "%s%s%s%s" % (bits, obit, gbit, ubit)

    def check_unix_sock(group, perms, path, readonly=False):
        """
        Check the validity of one libvirt socket file, including existence,
        group name, access permission and usability of virsh command.

        :param group: Expected group of the file.
        :param perms: Expected permission string of the file.
        :param path: Absolute path of the target file.
        :return : True if success or False if any test fails.
        """
        mode = os.stat(path).st_mode
        gid = os.stat(path).st_gid

        # Check file exists as a socket file.
        if not stat.S_ISSOCK(mode):
            logging.error("File %s is not a socket file." % path)
            return False

        # Check file group ID.
        try:
            expected_gid = grp.getgrnam(group).gr_gid
            logging.debug('Group ID of %s is %s' % (group, expected_gid))
            if gid != expected_gid:
                logging.error('File group gid expected to be '
                              ' %s, but %s found' % (expected_gid, gid))
                return False
        except KeyError:
            logging.error('Can not find group "%s"' % group)
            return False

        # Check file permissions.
        mode_str = mode_bits_to_str(stat.S_IMODE(mode))
        logging.debug('Permission of file %s is %s' % (path, mode_str))
        # Zero padding perms to 4 digits.
        expected_perms = perms.zfill(4)
        if mode_str != expected_perms:
            logging.error('Expected file permission is %s, but %s '
                          'found' % (expected_perms, mode_str))
            return False

        # Check virsh connection.
        uri = 'qemu+unix:///system?socket=%s' % path

        # Prepare test XML file.
        net_name = "unix_sock_test"
        xml_cont = "<network><name>%s</name></network>" % net_name
        xml_path = os.path.join(data_dir.get_tmp_dir(), net_name + '.xml')
        with open(xml_path, 'w') as xml_file:
            xml_file.write(xml_cont)

        result = virsh.net_define(xml_path, uri=uri, ignore_status=True)
        logging.debug('Result of virsh test run is:\n %s' % result)
        try:
            if result.exit_status and not readonly:
                logging.error('Error encountered when running virsh net-define '
                              'on socket file %s' % path)
                return False
            elif readonly and not result.exit_status:
                logging.error('Expect fail when running virsh net-define on '
                              'read-only socket file %s, but succeeded.' % path)
                return False
        finally:
            # Cleanup network and temp file
            virsh.net_undefine(net_name, uri=uri, ignore_status=True)
            if os.path.exists(xml_path):
                os.remove(xml_path)

        # All success
        return True

    def check_all_unix_sock(group, ro_perms, rw_perms, root_path):
        """
        Check the validity of two libvirt socket files.

        :param group: Expected group of the files.
        :param ro_perms: Expected permission string of the read-only file.
        :param rw_perms: Expected permission string of the read-write file.
        :param root_path: Absolute path of the directory that target file in.
        :return : True if success or False if any test fails.
        """
        rw_path = os.path.join(root_path, 'libvirt-sock')
        logging.debug("Checking read-write socket file %s" % rw_path)
        if not check_unix_sock(group, rw_perms, rw_path):
            return False

        ro_path = os.path.join(root_path, 'libvirt-sock-ro')
        logging.debug("Checking read-only socket file %s" % ro_path)
        return check_unix_sock(group, ro_perms, ro_path, readonly=True)

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if libvirt_version.version_compare(5, 6, 0):
        ro_perms = '0666'
        rw_perms = '0666'
    else:
        ro_perms = params.get('unix_sock_ro_perms', '0777')
        rw_perms = params.get('unix_sock_rw_perms', '0777')
    path = params.get('unix_sock_dir', '/var/run/libvirt')
    expected_result = params.get('expected_result', 'success')
    # In Ubuntu there is no group called "nobody", "nogroup" instead.
    distro_details = distro.detect()
    if distro_details.name == 'Ubuntu':
        group = params.get('unix_sock_group', 'nogroup')
    else:
        group = params.get('unix_sock_group', 'nobody')
    try:
        # Change params in libvirtd.conf
        config.unix_sock_group = group
        config.unix_sock_ro_perms = ro_perms
        config.unix_sock_rw_perms = rw_perms
        config.unix_sock_dir = path
        # Restart libvirtd to make change valid.
        if path == '/var/run/libvirt':
            restarted = libvirtd.restart()
        # Using restart() in utils_libvirtd will try to connect daemon
        # with 'virsh list'. This will fail if socket file location
        # changed. We solve this by bypassing the checking part.
        else:
            restarted = libvirtd.libvirtd.restart()

        if not restarted:
            if expected_result != 'unbootable':
                raise exceptions.TestFail('Libvirtd is expected to be started.')
            return

        if expected_result == 'unbootable':
            raise exceptions.TestFail('Libvirtd is not expected to be started.')

        if check_all_unix_sock(group, ro_perms, rw_perms, path):
            if expected_result == 'fail':
                raise exceptions.TestFail('Expected fail, but check passed.')
        else:
            if expected_result == 'success':
                raise exceptions.TestFail('Expected success, but check failed.')
    finally:
        config.restore()
        libvirtd.restart()
        process.system("systemctl restart libvirtd.socket", ignore_status=True)
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        """
        bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

    def _blockcopy_cmd():
        """
        Run blockcopy command
        """
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)
        _blockjob_and_libvirtd_chk(cmd_result)
        if cmd_result.exit_status:
            return False
        elif "Copy aborted" in cmd_result.stdout:
            return False
        else:
            return cmd_result

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if disk_xml.source.attrs.has_key('file'):
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif (disk_xml.source.attrs.has_key('dev')
              or disk_xml.source.attrs.has_key('name')
              or disk_xml.source.attrs.has_key('pool')):
            if (disk_xml.type_name == 'block'
                    or disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if new_attrs.has_key('name'):
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif new_attrs.has_key('pool'):
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            utl.set_vm_disk(vm, params, tmp_dir, test)
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail")
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")
            cmd_result = utils_misc.wait_for(_blockcopy_cmd, 10)
            if not cmd_result:
                raise exceptions.TestFail("Run blockcopy command fail")
            status = 0
        else:
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                         **extra_dict)
            _blockjob_and_libvirtd_chk(cmd_result)
            status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not utl.check_blockjob(vm_name, target, "bandwidth",
                                              bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout, excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stderr)
        else:
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise error.TestNAError("At least 2 numa nodes are needed on host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = "/var/tmp/virt-test.conf"
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("nodeset %s out of range" %
                                            numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        current_mem = vmxml.current_mem
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError, e:
            raise error.TestFail("Test failed in positive case.\n error: %s" %
                                 e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise error.TestFail("numa memory config %s not expected after"
                                     " live update" % numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node_new = [
                node_list.index(i) for i in node_list if i != node
            ]
            used_node = [node_list.index(node)]

            mem_compare(used_node, left_node_new)
Exemplo n.º 13
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")
    remote_uri = params.get("remote_uri")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(),
                                         "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
                    if start_action == "restart_libvirtd":
                        libvirtd.restart()
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name,
                           dump_file,
                           dump_option,
                           ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        # Timing issue cause test to check domstate before prior action
        # kill gets completed
        if vm_action == "kill":
            utils_misc.wait_for(vm.is_dead, timeout=20)

        if remote_uri:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            if remote_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

        result = virsh.domstate(vm_ref,
                                extra,
                                ignore_status=True,
                                debug=True,
                                uri=remote_uri)
        status = result.exit_status
        output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                if libvirtd_state == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed.")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in [
                            'coredump-destroy', 'coredump-restart'
                    ]:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart"
                            and check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists" %
                                      libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" %
                               (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True,
                                           shell=True).exit_status:
                            test.fail(
                                "Find error message %s from log file: %s." %
                                (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test host_uuid parameter in libvird.conf.

    1) Change host_uuid in libvirtd.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check current host UUID by `virsh capabilities`;
    """
    def get_dmi_uuid():
        """
        Retrieve the UUID of DMI, which is usually used as libvirt daemon
        host UUID.

        :return : DMI UUID if it can be located or None if can't.
        """
        uuid_paths = [
            '/sys/devices/virtual/dmi/id/product_uuid',
            '/sys/class/dmi/id/product_uuid',
        ]
        for path in uuid_paths:
            if os.path.isfile(path):
                dmi_fp = open(path)
                try:
                    uuid = dmi_fp.readline().strip().lower()
                    return uuid
                finally:
                    dmi_fp.close()

    uuid_type = params.get("uuid_type", "lowercase")
    expected_result = params.get("expected_result", "success")
    new_uuid = params.get("new_uuid", "")

    # We are expected to get an standard UUID format on success.
    if expected_result == 'success':
        expected_uuid = str(uuid.UUID(new_uuid))

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        orig_uuid = capability_xml.CapabilityXML()['uuid']
        logging.debug('Original host UUID is %s' % orig_uuid)

        if uuid_type == 'not_set':
            # Remove `host_uuid` in libvirtd.conf.
            del config.host_uuid
        elif uuid_type == 'unterminated':
            # Change `host_uuid` in libvirtd.conf.
            config.set_raw('host_uuid', '"%s' % new_uuid)
        elif uuid_type == 'unquoted':
            config.set_raw('host_uuid', new_uuid)
        elif uuid_type == 'single_quoted':
            config.set_raw('host_uuid', "'%s'" % new_uuid)
        else:
            config.host_uuid = new_uuid

        # Restart libvirtd to make change valid. May raise ConfigError
        # if not succeed.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                raise error.TestFail('Libvirtd is expected to be started '
                                     'with host_uuid = %s' % config['host_uuid'])
            return

        if expected_result == 'unbootable':
            raise error.TestFail('Libvirtd is not expected to be started '
                                 'with host_uuid = %s' % config['host_uuid'])

        cur_uuid = capability_xml.CapabilityXML()['uuid']
        logging.debug('Current host UUID is %s' % cur_uuid)

        if expected_result == 'success':
            if cur_uuid != expected_uuid:
                raise error.TestFail(
                    "Host UUID doesn't changed as expected"
                    " from %s to %s, but %s" % (orig_uuid, expected_uuid,
                                                cur_uuid))
        # libvirtd should use system DMI UUID for all_digit_same or
        # not_set host_uuid.
        elif expected_result == 'dmi_uuid':
            dmi_uuid = get_dmi_uuid()
            logging.debug("DMI UUID is %s." % dmi_uuid)

            if dmi_uuid is not None and cur_uuid != dmi_uuid:
                raise error.TestFail(
                    "Host UUID doesn't changed from "
                    "%s to DMI UUID %s as expected, but %s" % (
                        orig_uuid, dmi_uuid, cur_uuid))
    finally:
        config.restore()
        if not libvirtd.is_running():
            libvirtd.start()
Exemplo n.º 15
0
def run(test, params, env):
    """
    Start libvirt daemon with different options.
    Check socket files.
    """
    log = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        log.append(line)

    def check_help(params):
        """
        Check whether the output is help and meets expectation
        """
        expected_help = params.get('expected_help', 'no') == 'yes'
        is_help = any(line.startswith('Usage:') for line in log)
        if expected_help != is_help:
            raise error.TestFail(
                'Expected output help is %s, but get output:\n%s' %
                (expected_help, '\n'.join(log)))

    def check_version(params):
        """
        Check whether the output is libvirtd version.
        """
        expected_version = params.get('expected_version', 'no') == 'yes'
        is_version = log[0].startswith('libvirtd (libvirt)')
        if expected_version != is_version:
            raise error.TestFail(
                'Expected output version is %s, but get output:\n%s' %
                (expected_version, '\n'.join(log)))

    def check_unix_socket_files():
        """
        Check whether the socket file exists.
        """
        rw_sock_path = '/var/run/libvirt/libvirt-sock'
        ro_sock_path = '/var/run/libvirt/libvirt-sock-ro'

        if libvirtd.running:
            if not os.path.exists(rw_sock_path):
                raise error.TestFail('RW unix socket file not found at %s' %
                                     rw_sock_path)
            if not os.path.exists(ro_sock_path):
                raise error.TestFail('RO unix socket file not found at %s' %
                                     ro_sock_path)
        else:
            if os.path.exists(rw_sock_path) or os.path.exists(ro_sock_path):
                raise error.TestFail('Expect unix socket file do not exists '
                                     'when libvirtd is stopped')

    def check_pid_file():
        """
        Check whether the pid file exists.
        """
        if not os.path.exists(pid_path):
            raise error.TestFail("PID file not found at %s" % pid_path)

        pid_file = open(pid_path)
        pid = int(pid_file.readline())
        pid_file.close()

        result = utils.run('pgrep libvirtd', ignore_status=True)
        expected_pid = int(result.stdout.split()[0])

        if pid != expected_pid:
            raise error.TestFail("PID file content mismatch. Expected %s "
                                 "but got %s" % (expected_pid, pid))

    def check_config_file():
        """
        Check whether the config file take effects by checking UUID.
        """
        cur_uuid = capability_xml.CapabilityXML()['uuid']
        if cur_uuid != check_uuid:
            raise error.TestFail('Expected host UUID is %s, but got %s' %
                                 (check_uuid, cur_uuid))

    MAX_TIMEOUT = 10
    arg_str = params.get("libvirtd_arg", "")
    time_tolerance = float(params.get("exit_time_tolerance", 1))
    expected_exit_time = float(params.get("expected_exit_time", 'inf'))
    config_path = params.get('expected_config_path', "")
    pid_path = params.get('expected_pid_path', "")

    if expected_exit_time == float('inf'):
        timeout = MAX_TIMEOUT
    else:
        if expected_exit_time > 0:
            if len(virsh.dom_list('--name').stdout.strip().splitlines()):
                raise error.TestNAError('Timeout option will be ignore if '
                                        'there exists living domain')
        timeout = expected_exit_time + time_tolerance

    libvirtd = LibvirtdSession(logging_handler=_logger, )

    # Setup config file.
    check_uuid = '13371337-1337-1337-1337-133713371337'
    if config_path:
        open(config_path, 'a').close()
        config = utils_config.LibvirtdConfig(config_path)
        config.host_uuid = check_uuid

    try:
        check_unix_socket_files()

        libvirtd.start(arg_str=arg_str, wait_for_working=False)

        start = time.time()
        libvirtd_exited = libvirtd.wait_for_stop(timeout=timeout, step=0.1)
        wait_time = time.time() - start

        if log:
            logging.debug("Libvirtd log:")
            for line in log:
                logging.debug(line)

            check_help(params)
            check_version(params)

        if libvirtd_exited:
            if expected_exit_time == float('inf'):
                raise error.TestFail("Expected never stop, but ran %ss" %
                                     wait_time)
            elif wait_time < expected_exit_time - time_tolerance:
                raise error.TestFail(
                    "Expected exit in %ss(+-%ss), but ran %ss" %
                    (expected_exit_time, time_tolerance, wait_time))
        else:
            if expected_exit_time != float('inf'):
                raise error.TestFail(
                    "Expected exit in %ss(+-%ss), but ran timeout in %ss" %
                    (expected_exit_time, time_tolerance, wait_time))

        check_unix_socket_files()
        if config_path:
            check_config_file()
        if pid_path:
            check_pid_file()
    finally:
        libvirtd.exit()

        # Clean up config file
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if os.path.exists(pid_path):
            os.remove(pid_path)
Exemplo n.º 16
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.tmpdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(test.tmpdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(test.tmpdir,
                                         params.get(external_disk))
                utils.run("qemu-img create -f qcow2 %s 1G" % disk_path)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            utils.run("chmod 500 %s" % disk_path)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            if vm.is_alive():
                vm.destroy()
            virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
            virt_xml_obj.set_agent_channel(vm_name)
            vm.start()
            session = vm.wait_for_login()

            # Check if qemu-ga already started automatically
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                raise error.TestNAError("Fail to install qemu-guest-agent, "
                                        "make sure that you have usable repo "
                                        "in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                if start_ga == "yes":
                    session.cmd("qemu-ga -d")
                    # Check if the qemu-ga really started
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if stat_ps != 0:
                        raise error.TestNAError("Fail to run qemu-ga in guest")
            else:
                if start_ga == "no":
                    # The qemu-ga could be running and should be killed
                    session.cmd("kill -9 `pidof qemu-ga`")
                    # Check if the qemu-ga get killed
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        # As managed by systemd and set as autostart, qemu-ga
                        # could be restarted, so use systemctl to stop it.
                        session.cmd("systemctl stop qemu-guest-agent")
                        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                        if not stat_ps:
                            raise error.TestNAError("Fail to stop agent in "
                                                    "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                utils.run("qemu-img create -f qcow2 %s 200M" % disk_path)
                virsh.attach_disk(vm_name, disk_path,
                                  'vd%s' % list(string.lowercase)[i],
                                  debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name, options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    raise error.TestFail("Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            raise error.TestFail("Run failed but file %s exist"
                                                 % option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            raise error.TestFail("Domain xml should not be "
                                                 "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    raise error.TestFail("Run failed with right command: %s"
                                         % output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(vm_name, options, option_dict, output,
                                    snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    raise error.TestFail("'%s' was found: %s"
                                                         % (pattern, line))

    finally:
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            raise error.TestFail("Still can find snapshot metadata")

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                external_disk = "external_disk%s" % i
                disk_path = os.path.join(test.tmpdir, params.get(external_disk))
                if os.path.exists(disk_path):
                    os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test libvirtd_config parameter in /etc/sysconfig/libvirtd.

    1) Change libvirtd_config in sysconfig;
    2) Change host_uuid in newly defined libvirtd.conf file;
    3) Restart libvirt daemon;
    4) Check if libvirtd successfully started;
    5) Check if host_uuid updated accordingly;
    """
    def get_init_name():
        """
        Internal function to determine what executable is PID 1,
        :return: executable name for PID 1, aka init
        """
        fp = open('/proc/1/comm')
        name = fp.read().strip()
        fp.close()
        return name

    libvirtd_config = params.get('libvirtd_config', 'not_set')
    expected_result = params.get('expected_result', 'success')

    if get_init_name() == 'systemd':
        logging.info('Init process is systemd, '
                     'LIBVIRTD_CONFIG should not working.')
        expected_result = 'unchanged'

    sysconfig = utils_config.LibvirtdSysConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    config_path = ""
    check_uuid = '13371337-1337-1337-1337-133713371337'
    try:
        if libvirtd_config == 'not_set':
            del sysconfig.LIBVIRTD_CONFIG
        elif libvirtd_config == 'exist_file':
            config_path = os.path.join(data_dir.get_tmp_dir(), 'test.conf')
            open(config_path, 'a').close()

            config = utils_config.LibvirtdConfig(config_path)
            config.host_uuid = check_uuid

            sysconfig.LIBVIRTD_CONFIG = config_path
        else:
            sysconfig.LIBVIRTD_CONFIG = libvirtd_config

        if not libvirtd.restart():
            if expected_result != 'unbootable':
                raise error.TestFail('Libvirtd is expected to be started '
                                     'with LIBVIRTD_CONFIG = '
                                     '%s' % sysconfig.LIBVIRTD_CONFIG)
        if expected_result == 'unbootable':
            raise error.TestFail('Libvirtd is not expected to be started '
                                 'with LIBVIRTD_CONFIG = '
                                 '%s' % sysconfig.LIBVIRTD_CONFIG)
        cur_uuid = capability_xml.CapabilityXML()['uuid']
        if cur_uuid == check_uuid:
            if expected_result == 'unchange':
                raise error.TestFail('Expected host UUID is not changed, '
                                     'but got %s' % cur_uuid)
        else:
            if expected_result == 'change':
                raise error.TestFail('Expected host UUID is %s, but got %s' %
                                     (check_uuid, cur_uuid))

    finally:
        if libvirtd_config == 'exist_file':
            config.restore()
            if os.path.isfile(config_path):
                os.remove(config_path)
        sysconfig.restore()
        libvirtd.restart()
Exemplo n.º 18
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not cpu.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name,
                                             "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(
                    r"vcpu.*current=.%s.*" % config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip(
                    '\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    with open(config_path, 'a') as f:
        pass
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop",
                        ignore_status=True,
                        shell=True)
            process.run("virtlogd -d", shell=True)
        except path.CmdNotFoundError:
            pass

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise exceptions.TestFail("numad usage not found in libvirtd"
                                          " log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [
                total_online_node_list.index(i) for i in total_online_node_list
                if i != node
            ]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True, shell=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True,
                        shell=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test unix_sock_* parameter in libvird.conf.

    1) Change unix_sock_* in libvirtd.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check if libvirtd socket file changed accordingly;
    """
    def mode_bits_to_str(bits):
        """
        Translate a integer returned by stat.S_IMODE() to 4-digit permission
        string.
        :param bits: A integer returned by stat.S_IMODE(), like "511".
        :return : Translated 4-digit permission string, like "0777".
        """
        ubit = bits % 8
        bits /= 8
        gbit = bits % 8
        bits /= 8
        obit = bits % 8
        bits /= 8
        return "%s%s%s%s" % (bits, obit, gbit, ubit)

    def check_unix_sock(group, perms, path):
        """
        Check the validity of one libvirt socket file, including existance,
        group name, access permission and usability of virsh command.

        :param group: Expected group of the file.
        :param perms: Expected permission string of the file.
        :param path: Absolute path of the target file.
        :return : True if success or False if any test fails.
        """
        mode = os.stat(path).st_mode
        gid = os.stat(path).st_gid

        # Check file exists as a socket file.
        if not stat.S_ISSOCK(mode):
            logging.error("File %s is not a socket file." % path)
            return False

        # Check file group ID.
        try:
            expected_gid = grp.getgrnam(group).gr_gid
            logging.debug('Group ID of %s is %s' % (group, expected_gid))
            if gid != expected_gid:
                logging.error('File group gid expected to be '
                              ' %s, but %s found' % (expected_gid, gid))
                return False
        except KeyError:
            logging.error('Can not find group "%s"' % group)
            return False

        # Check file permissions.
        mode_str = mode_bits_to_str(stat.S_IMODE(mode))
        logging.debug('Permission of file %s is %s' % (path, mode_str))
        # Zero padding perms to 4 digits.
        expected_perms = perms.zfill(4)
        if mode_str != expected_perms:
            logging.error('Expected file permission is %s, but %s '
                          'found' % (expected_perms, mode_str))
            return False

        # Check virsh connection.
        uri = 'qemu+unix:///system?socket=%s' % path
        result = virsh.dom_list('--all', uri=uri)
        logging.debug('Result of virsh test run is:\n %s' % result)
        if result.exit_status:
            logging.error('Error encountered when running virsh list on '
                          'socket file %s' % path)
            return False

        # All success
        return True

    def check_all_unix_sock(group, ro_perms, rw_perms, root_path):
        """
        Check the validity of two libvirt socket files.

        :param group: Expected group of the files.
        :param ro_perms: Expected permission string of the read-only file.
        :param rw_perms: Expected permission string of the read-write file.
        :param root_path: Absolute path of the directory that target file in.
        :return : True if success or False if any test fails.
        """
        rw_path = os.path.join(root_path, 'libvirt-sock')
        logging.debug("Checking read-write socket file %s" % rw_path)
        if not check_unix_sock(group, rw_perms, rw_path):
            return False

        ro_path = os.path.join(root_path, 'libvirt-sock-ro')
        logging.debug("Checking read-only socket file %s" % ro_path)
        return check_unix_sock(group, ro_perms, ro_path)

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    group = params.get('unix_sock_group', 'root')
    ro_perms = params.get('unix_sock_ro_perms', '0777')
    rw_perms = params.get('unix_sock_rw_perms', '0777')
    path = params.get('unix_sock_dir', '/var/run/libvirt')
    expected_result = params.get('expected_result', 'success')
    try:
        # Change params in libvirtd.conf
        config.unix_sock_group = group
        config.unix_sock_ro_perms = ro_perms
        config.unix_sock_rw_perms = rw_perms
        config.unix_sock_dir = path

        # Restart libvirtd to make change valid.
        if path == '/var/run/libvirt':
            restarted = libvirtd.restart()
        # Using restart() in utils_libvirtd will try to connect daemon
        # with 'virsh list'. This will fail if socket file location
        # changed. We solve this by bypassing the checking part.
        else:
            restarted = libvirtd.libvirtd.restart()

        if not restarted:
            if expected_result != 'unbootable':
                raise error.TestFail('Libvirtd is expected to be started.')
            return

        if expected_result == 'unbootable':
            raise error.TestFail('Libvirtd is not expected to be started.')

        if check_all_unix_sock(group, ro_perms, rw_perms, path):
            if expected_result == 'fail':
                raise error.TestFail('Expected fail, but check passed.')
        else:
            if expected_result == 'success':
                raise error.TestFail('Expected success, but check failed.')
    finally:
        config.restore()
        libvirtd.restart()
Exemplo n.º 21
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        test.cancel("This version of libvirt does not support "
                    "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")
    disk_snapshot_attr = params.get('disk_snapshot_attr', 'external')
    set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no")

    # gluster related params
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_src_protocol == 'gluster':
            test.cancel("Snapshot on glusterfs not support in "
                        "current version. Check more info with "
                        "https://bugzilla.redhat.com/buglist.cgi?"
                        "bug_id=1017289,1032370")

    if libvirt_version.version_compare(5, 5, 0):
        # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and
        # --print-xml to be used together.
        if "--no-metadata" in options and "--print-xml" in options:
            logging.info("--no-metadata and --print-xml can be used together "
                         "in this libvirt version. Not expecting a failure.")
            status_error = "no"

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk)
        with open(bad_disk, 'w') as bad_file:
            pass

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         params.get(external_disk))
                process.run("qemu-img create -f qcow2 %s 1G" % disk_path,
                            shell=True)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            process.run("chmod 500 %s" % disk_path, shell=True)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(),
                                             "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        if replace_vm_disk:
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if set_snapshot_attr:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = vmxml_backup.get_devices(device_type="disk")[0]
            vmxml_new.del_device(disk_xml)
            # set snapshot attribute in disk xml
            disk_xml.snapshot = disk_snapshot_attr
            new_disk = disk.Disk(type_name='file')
            new_disk.xmltreefile = disk_xml.xmltreefile
            vmxml_new.add_device(new_disk)
            logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile)
            vmxml_new.sync()
            vm.start()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            vm.prepare_guest_agent()
            session = vm.wait_for_login()
            if start_ga == "no":
                # The qemu-ga could be running and should be killed
                session.cmd("kill -9 `pidof qemu-ga`")
                # Check if the qemu-ga get killed
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if not stat_ps:
                    # As managed by systemd and set as autostart, qemu-ga
                    # could be restarted, so use systemctl to stop it.
                    session.cmd("systemctl stop qemu-guest-agent")
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        test.cancel("Fail to stop agent in " "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)
        else:
            # Remove channel if exist
            if vm.is_alive():
                vm.destroy(gracefully=False)
            xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name)
            xml_inst.remove_agent_channels()
            vm.start()

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         'disk%s.qcow2' % i)
                process.run("qemu-img create -f qcow2 %s 200M" % disk_path,
                            shell=True)
                virsh.attach_disk(vm_name,
                                  disk_path,
                                  'vd%s' % list(string.ascii_lowercase)[i],
                                  debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name,
                                                   options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name,
                                                      options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
                # for multi snapshots without specific snapshot name, the
                # snapshot name is using time string with 1 second
                # incremental, to avoid get snapshot failure with same name,
                # sleep 1 second here.
                if int(multi_num) > 1:
                    time.sleep(1.1)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    test.fail("Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            test.fail("Run failed but file %s exist" %
                                      option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            test.fail("Domain xml should not be "
                                      "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    test.fail("Run failed with right command: %s" % output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(test, vm_name, options, option_dict,
                                    output, snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    test.fail("'%s' was found: %s" %
                                              (pattern, line))

    finally:
        if vm.is_alive():
            vm.destroy()
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            test.fail("Still can find snapshot metadata")

        if disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd.restart()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd)

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                if reuse_external:
                    external_disk = "external_disk%s" % i
                    disk_path = os.path.join(data_dir.get_tmp_dir(),
                                             params.get(external_disk))
                    if os.path.exists(disk_path):
                        os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
def run(test, params, env):
    """
    Test virt-admin  server-clients-set
    2) Change max_clients to a new value;
    3) get the current clients info;
    4) check whether the clients info is correct;
    5) try to connect other client onto the server;
    6) check whether the above connection status is correct.
    """

    server_name = params.get("server_name")
    is_positive = params.get("is_positive") == "yes"
    options_ref = params.get("options_ref")
    nclients_max = params.get("nclients_maxi")
    nclients = params.get("nclients")
    nclients_unauth_max = params.get("nclients_unauth_maxi")
    connect_able = params.get("connect_able")
    options_test_together = params.get("options_test_together")

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    vp = virt_admin.VirtadminPersistent()
    virsh_instance = []

    def clients_info(server):
        """
        check the attributes by server-clients-set.
        1) get the output  returned by server-clients-set;
        2) split the output to get a dictionary of those attributes;
        :params server: print the info of the clients connecting to this server
        :return: a dict obtained by transforming the result_info
        """
        result_info = vp.srv_clients_info(server,
                                          ignore_status=True,
                                          debug=True)
        out = result_info.stdout.strip().splitlines()
        out_split = [item.split(':') for item in out]
        out_dict = dict([[item[0].strip(), item[1].strip()]
                         for item in out_split])
        return out_dict

    def chk_connect_to_libvirtd(connect_able):
        try:
            virsh_instance.append(virsh.VirshPersistent(uri='qemu:///system'))
        except Exception as info:
            if connect_able == "yes":
                test.fail(
                    "Connection to libvirtd is not success, error:\n %s" %
                    info)
            else:
                logging.info("Connections to libvirtd should not success, "
                             "this is a correct test result!")
        else:
            if connect_able == "yes":
                logging.info("Connections to libvirtd is successful, "
                             "this is a correct test result!")
            else:
                test.fail("error: Connection to libvirtd should not success! "
                          "Check the attributes.")

    try:
        if options_ref:
            if "max-clients" in options_ref:
                if nclients:
                    if int(nclients_max) > int(nclients):
                        config.max_clients = nclients
                        config.max_anonymous_clients = nclients_unauth_max
                        libvirtd.restart()
                        for _ in range(int(nclients)):
                            virsh_instance.append(
                                virsh.VirshPersistent(uri='qemu:///system'))
                        result = vp.srv_clients_set(server_name,
                                                    max_clients=nclients_max,
                                                    ignore_status=True,
                                                    debug=True)
                    elif int(nclients_max) <= int(nclients):
                        for _ in range(int(nclients)):
                            virsh_instance.append(
                                virsh.VirshPersistent(uri='qemu:///system'))
                        result = vp.srv_clients_set(
                            server_name,
                            max_clients=nclients_max,
                            max_unauth_clients=nclients_unauth_max,
                            ignore_status=True,
                            debug=True)

                else:
                    result = vp.srv_clients_set(server_name,
                                                max_clients=nclients_max,
                                                ignore_status=True,
                                                debug=True)
            elif "max-unauth-clients" in options_ref:
                result = vp.srv_clients_set(
                    server_name,
                    max_unauth_clients=nclients_unauth_max,
                    ignore_status=True,
                    debug=True)
        elif options_test_together:
            result = vp.srv_clients_set(server_name,
                                        max_clients=nclients_max,
                                        max_unauth_clients=nclients_unauth_max,
                                        ignore_status=True,
                                        debug=True)

        outdict = clients_info(server_name)

        if result.exit_status:
            if is_positive:
                test.fail("This operation should success "
                          "but failed! output:\n%s " % result)
            else:
                logging.debug("This failure is expected!")
        else:
            if is_positive:
                if options_ref:
                    if "max-clients" in options_ref:
                        if outdict["nclients_max"] != nclients_max:
                            test.fail("attributes set by server-clients-set "
                                      "is not correct!")
                        if nclients:
                            chk_connect_to_libvirtd(connect_able)
                    elif "max_unauth_clients" in options_ref:
                        if outdict[
                                "nclients_unauth_max"] != nclients_unauth_max:
                            test.fail("attributes set by server-clients-set "
                                      "is not correct!")
                elif options_test_together:
                    if (outdict["nclients_max"] != nclients_max
                            or outdict["nclients_unauth_max"] !=
                            nclients_unauth_max):
                        test.fail("attributes set by server-clients-set "
                                  "is not correct!")
            else:
                test.fail("This is a negative case, should get failure.")
    finally:
        for session in virsh_instance:
            session.close_session()
        config.restore()
        libvirtd.restart()