コード例 #1
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise error.TestNAError("At least 2 numa nodes are needed on host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = "/var/tmp/virt-test.conf"
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("nodeset %s out of range" %
                                            numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        current_mem = vmxml.current_mem
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError, e:
            raise error.TestFail("Test failed in positive case.\n error: %s" %
                                 e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise error.TestFail("numa memory config %s not expected after"
                                     " live update" % numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node_new = [
                node_list.index(i) for i in node_list if i != node
            ]
            used_node = [node_list.index(node)]

            mem_compare(used_node, left_node_new)
コード例 #2
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    with open(config_path, 'a') as f:
        pass
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop",
                        ignore_status=True,
                        shell=True)
            process.run("virtlogd -d", shell=True)
        except path.CmdNotFoundError:
            pass

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise exceptions.TestFail("numad usage not found in libvirtd"
                                          " log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [
                total_online_node_list.index(i) for i in total_online_node_list
                if i != node
            ]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True, shell=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True,
                        shell=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
コード例 #3
0
ファイル: numa_memory.py プロジェクト: smitterl/tp-libvirt
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            test.fail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        ret = process.run(cmd, shell=True)
        cpu_num = int(ret.stdout_text.split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num + 1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            test.fail("vcpuinfo cpu affinity not expected")

    def numa_mode_check(mode_nodeset):
        """
        when the mode = 'preferred' or 'interleave', it is better to check
        numa_maps.
        """
        vm_pid = vm.get_pid()
        numa_map = '/proc/%s/numa_maps' % vm_pid
        # Open a file
        with open(numa_map) as file:
            for line in file.readlines():
                if line.split()[1] != mode_nodeset:
                    test.fail("numa node and nodeset %s is "
                              "not expected" % mode_nodeset)

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", " ".join(map(str, node_list)))

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = numa_memory["nodeset"]
            if '-' in nodes:
                for nnode in range(int(nodes.split('-')[0]),
                                   int(nodes.split('-')[1]) + 1):
                    ppc_memory_nodeset += str(node_list[nnode]) + ','
            else:
                node_lst = nodes.split(',')
                for nnode in range(len(node_lst)):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[nnode])]) + ','
            numa_memory["nodeset"] = ppc_memory_nodeset[:-1]
        except (KeyError, IndexError):
            pass

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop", ignore_status=True)
            process.run("virtlogd -d")
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted successfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num + 1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes'

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    if not dynamic_parameters:
                        test.cancel("nodeset %s out of range" %
                                    numa_memory['nodeset'])
                    else:
                        if '-' in numa_memory['nodeset']:
                            nodes_size = len(numa_memory['nodeset'].split('-'))
                        else:
                            nodes_size = len(numa_memory['nodeset'].split(','))
                        if nodes_size > len(node_list):
                            test.cancel("nodeset %s out of range" %
                                        numa_memory['nodeset'])
                        else:
                            numa_memory['nodeset'] = node_list[:nodes_size]

        if vcpu_cpuset:
            pre_cpuset = cpu.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                if not dynamic_parameters:
                    test.cancel("cpuset %s out of range" % vcpu_cpuset)
                else:
                    random_cpus = []
                    # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is
                    # added twice or the list of selected CPUs is not long enough
                    for i in range(
                            len([int(i) for i in vcpu_cpuset.split(',')])):
                        rand_cpu = random.randint(min(cpu_list), max(cpu_list))
                        while rand_cpu in random_cpus:
                            rand_cpu = random.randint(min(cpu_list),
                                                      max(cpu_list))
                        random_cpus.append(rand_cpu)
                    random_cpus.sort()
                    vcpu_cpuset = (','.join(
                        [str(cpu_num) for cpu_num in random_cpus]))
                    pre_cpuset = cpu.cpus_parser(vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vmxml.xmltreefile.find('cputune'):
            vmxml.xmltreefile.remove_by_xpath('/cputune')
        else:
            logging.debug('No vcpupin found')
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem // 1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                test.fail("memory config %s not expected "
                          "after domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    test.fail("cpuset not found in domain xml.")

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n "
                          "error: %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node, vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    test.fail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            if numa_memory.get('mode') == 'strict':
                left_node = [
                    node_list.index(i) for i in node_list if i not in used_node
                ]
                used_node = [node_list.index(i) for i in used_node]
                mem_compare(used_node, left_node)
            elif numa_memory.get('mode') == 'preferred':
                mode_nodeset = 'prefer:' + numa_memory.get('nodeset')
                numa_mode_check(mode_nodeset)
            else:
                mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get(
                    'nodeset')
                numa_mode_check(mode_nodeset)
        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                test.fail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                logging.warning('numa log:\n%s\n' %
                                numad_log[0].split("numad ")[-1])
                logging.warning('numa cmd opt:\n%s\n' % numad_cmd_opt)
                test.fail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = cpu.cpus_parser(numad_ret)
            left_node = [
                node_list.index(i) for i in node_list if i not in numad_node
            ]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                if numa_memory.get('mode') == 'strict':
                    mem_compare(numad_node_seq, left_node)
                elif numa_memory.get('mode') == 'preferred':
                    mode_nodeset = 'prefer:' + numad_ret
                    numa_mode_check(mode_nodeset)
                else:
                    mode_nodeset = numa_memory.get('mode') + ':' + numad_ret
                    numa_mode_check(mode_nodeset)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        test.fail("cpu usage in node %s is not expected" % i)
                cpu_affinity_check(node=numad_node)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
コード例 #4
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    bug_url = params.get("bug_url", "")
    vm_name = params.get("main_vm")

    if not libvirt_version.version_compare(1, 2, 6):
        raise error.TestNAError("Bug %s not fixed on current build" % bug_url)

    vm = env.get_vm(vm_name)
    # Prepare vm filterref parameters dict list
    filterref_dict = {}
    filterref_dict['name'] = filter_name

    # backup vm and filter xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_filter = libvirt_xml.NwfilterXML()
    filterxml = backup_filter.new_from_filter_dumpxml(filter_name)
    libvirtd = utils_libvirtd.LibvirtdSession()

    def nwfilter_sync_loop(filter_name, filerxml):
        """
        Undefine filter and redefine filter from xml in loop
        """
        for i in range(2400):
            virsh.nwfilter_undefine(filter_name, ignore_status=True)
            time.sleep(0.1)
            virsh.nwfilter_define(filterxml.xml, ignore_status=True)

    def vm_start_destory_loop(vm):
        """
        Start and destroy vm in loop
        """
        for i in range(2400):
            vm.start()
            time.sleep(0.1)
            vm.destroy(gracefully=False)

    try:
        libvirtd.start()
        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        filter_thread = threading.Thread(target=nwfilter_sync_loop,
                                         args=(filter_name, filterxml))
        vm_thread = threading.Thread(target=vm_start_destory_loop,
                                     args=(vm,))
        filter_thread.start()
        time.sleep(0.3)
        vm_thread.start()

        ret = utils_misc.wait_for(lambda: not libvirtd.is_working(),
                                  timeout=240,
                                  step=1)

        filter_thread.join()
        vm_thread.join()
        if ret:
            raise error.TestFail("Libvirtd hang, %s" % bug_url)

    finally:
        libvirtd.exit()
        # Clean env
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm and filter.
        vmxml_backup.sync()
        virsh.nwfilter_undefine(filter_name, ignore_status=True)
        virsh.nwfilter_define(filterxml.xml, ignore_status=True)
コード例 #5
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        cpu_num = int(utils.run(cmd).stdout.strip().split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num+1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            raise error.TestFail("vcpuinfo cpu affinity not expected")

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = "/var/tmp/virt-test.conf"
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num+1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    raise error.TestNAError("nodeset %s out of range" %
                                            numa_memory['nodeset'])

        if vcpu_cpuset:
            pre_cpuset = utils_test.libvirt.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                raise error.TestNAError("cpuset %s out of range" %
                                        vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem/1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                raise error.TestFail("memory config %s not expected after "
                                     "domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    raise error.TestFail("cpuset not found in domain xml.")

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node,
            vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    raise error.TestFail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node = [node_list.index(i) for i in node_list if i not in used_node]
            used_node = [node_list.index(i) for i in used_node]
            mem_compare(used_node, left_node)

        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                raise error.TestFail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = utils_test.libvirt.cpus_parser(numad_ret)
            left_node = [node_list.index(i) for i in node_list if i not in numad_node]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                mem_compare(numad_node_seq, left_node)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        raise error.TestFail("cpu usage in node %s is not "
                                             "expected" % i)
                cpu_affinity_check(node=numad_node)
コード例 #6
0
ファイル: libvirtd_start.py プロジェクト: smitterl/tp-libvirt
def run(test, params, env):
    """
    This case check error messages in libvirtd logging.

    Implemented test cases:
    with_iptables:  Start libvirtd when using iptables service as firewall.
    with_firewalld: Start libvirtd when using firewalld service as firewall.
    no_firewall:    Start libvirtd With both firewall services shut off.
    """
    def _error_handler(line, errors):
        """
        A callback function called when new error lines appears in libvirtd
        log, then this line is appended to list 'errors'

        :param errors: A list to contain all error lines.
        :param line: Newly found error line in libvirtd log.
        """
        errors.append(line)

    def _check_errors():
        """
        Check for unexpected error messages in libvirtd log.
        """
        logging.info('Checking errors in libvirtd log')
        accepted_error_patterns = [
            'Cannot access storage file',
            'Failed to autostart storage pool',
            'cannot open directory',
        ]

        if (not iptables_service and not firewalld_service
                and 'virt_t' not in libvirt_context):
            logging.info("virt_t is not in libvirtd process context. "
                         "Failures for setting iptables rules will be ignored")
            # libvirtd process started without virt_t will failed to set
            # iptables rules which is expected here
            accepted_error_patterns.append(
                '/sbin/iptables .* unexpected exit status 1')

        logging.debug("Accepted errors are: %s", accepted_error_patterns)

        if errors:
            logging.debug("Found errors in libvirt log:")
            for line in errors:
                logging.debug(line)

            unexpected_errors = []
            for line in errors:
                if any([re.search(p, line) for p in accepted_error_patterns]):
                    logging.debug('Error "%s" is acceptable', line)
                else:
                    unexpected_errors.append(line)
            if unexpected_errors:
                raise exceptions.TestFail(
                    "Found unexpected errors in libvirt log:\n%s" %
                    '\n'.join(unexpected_errors))

    iptables_service = params.get('iptables_service', 'off') == 'on'
    firewalld_service = params.get('firewalld_service', 'off') == 'on'

    # In RHEL7 iptables service is provided by a separated package
    # In RHEL6 iptables-services and firewalld is not supported
    # So try to install all required packages but ignore failures
    logging.info('Preparing firewall related packages')
    software_mgr = software_manager.SoftwareManager()
    for pkg in ['iptables', 'iptables-services', 'firewalld']:
        if not software_mgr.check_installed(pkg):
            software_mgr.install(pkg)

    # Backup services status
    service_mgr = service.ServiceManager()
    logging.info('Backing up firewall services status')
    backup_iptables_status = service_mgr.status('iptables')
    backup_firewalld_status = service_mgr.status('firewalld')

    # iptables-service got deprecated in newer distros
    if iptables_service and backup_iptables_status is None:
        raise exceptions.TestSkipError('iptables service not found')
    # firewalld service could not exists on many distros
    if firewalld_service and backup_firewalld_status is None:
        raise exceptions.TestSkipError('firewalld service not found')
    try:
        if iptables_service and firewalld_service:
            raise exceptions.TestError(
                'iptables service and firewalld service can not be started at '
                'the same time')

        # We should stop services first then start the other after.
        # Directly start one service will force the other service stop,
        # which will not be easy to handle.
        # Backup status should be compared with None to make sure that
        # service exists before action.
        logging.info('Changing firewall services status')
        if not iptables_service and backup_iptables_status is not None:
            process.run('iptables-save > /tmp/iptables.save', shell=True)
            service_mgr.stop('iptables')
        if not firewalld_service and backup_firewalld_status is not None:
            service_mgr.stop('firewalld')

        if iptables_service and backup_iptables_status is not None:
            service_mgr.start('iptables')
        if firewalld_service and backup_firewalld_status is not None:
            service_mgr.start('firewalld')
        errors = []
        # Run libvirt session and collect errors in log.
        libvirtd_session = utils_libvirtd.LibvirtdSession(
            service_name="virtnetworkd",
            logging_handler=_error_handler,
            logging_params=(errors, ),
            logging_pattern=r'[-\d]+ [.:+\d]+ [:\d]+ error :',
        )
        try:
            logging.info('Starting libvirtd session')
            libvirtd_session.start()
            time.sleep(3)

            libvirt_pid = libvirtd_session.tail.get_pid()
            sestatus = utils_selinux.get_status()
            if sestatus == "disabled":
                raise exceptions.TestSkipError("SELinux is in Disabled mode."
                                               "It must be in enforcing mode "
                                               "for test execution")
            libvirt_context = utils_selinux.get_context_of_process(libvirt_pid)
            logging.debug("The libvirtd process context is: %s",
                          libvirt_context)

        finally:
            libvirtd_session.exit()
        _check_errors()
    finally:
        logging.info('Recovering services status')
        #Restart socket service after starting process at foreground
        utils_libvirtd.Libvirtd("virtnetworkd.socket").restart()
        # If service do not exists, then backup status and current status
        # will all be none and nothing will be done
        if service_mgr.status('iptables') != backup_iptables_status:
            if backup_iptables_status:
                service_mgr.start('iptables')
                process.run('iptables-restore < /tmp/iptables.save',
                            shell=True)
            else:
                service_mgr.stop('iptables')
        if service_mgr.status('firewalld') != backup_firewalld_status:
            if backup_firewalld_status:
                service_mgr.start('firewalld')
            else:
                service_mgr.stop('firewalld')

        logging.info('Removing backup iptables')
        if os.path.exists("/tmp/iptables.save"):
            os.remove("/tmp/iptables.save")