Esempio n. 1
0
def get_numa_parameter(params, cgstop):
    """
    Get the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("vms")
    options = params.get("options", None)
    result = virsh.numatune(vm_name, options=options)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if cgstop:
                raise error.TestFail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            logging.info(result.stdout)
Esempio n. 2
0
def set_numa_parameter(params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("vms")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict'
    if mode == "strict" and start_vm == "yes":
        status_error = "no"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if cgstop:
                raise error.TestFail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            if (cpus_parser(nodeset)[-1] + 1) > num_numa_nodes():
                raise error.TestNAError("Host does not support requested"
                                        " nodeset")
            else:
                raise error.TestFail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The 'mode' or/and 'nodeset' are"
                                     " inconsistent with numatune XML")
Esempio n. 3
0
def set_numa_parameter(params):
    """
    Set the numa parameters
    @params: the parameter dictionary
    """
    vm_name = params.get("vms")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict'
    if mode == "strict" and start_vm == "yes":
        status_error = "no"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            raise error.TestFail("Unexpected return code %d" % status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The 'mode' or/and 'nodeset' are"
                                     " inconsistent with numatune XML")
Esempio n. 4
0
def get_numa_parameter(params):
    """
    Get the numa parameters
    :params: the parameter dictionary
    """
    vm_name = params.get("vms")
    options = params.get("options", None)
    result = virsh.numatune(vm_name, options=options)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            raise error.TestFail("Unexpected return code %d" % status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            logging.info(result.stdout)
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    with open(config_path, 'a') as f:
        pass
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop", ignore_status=True, shell=True)
            process.run("virtlogd -d", shell=True)
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted sucessfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise exceptions.TestFail("numad usage not found in libvirtd"
                                          " log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name, 'strict', str(node), options,
                           debug=True, ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node,
                vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [total_online_node_list.index(i)
                             for i in total_online_node_list if i != node]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True, shell=True)
            process.run('systemctl restart virtlogd.socket', ignore_status=True, shell=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Esempio n. 6
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise error.TestNAError("At least 2 numa nodes are needed on host")

    # Prepare numatune memory parameter dict
    mem_tuple = ("memory_mode", "memory_placement", "memory_nodeset")
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split("_")[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    open(config_path, "a").close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger, logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        if numa_memory.get("nodeset"):
            used_node = utils_test.libvirt.cpus_parser(numa_memory["nodeset"])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("nodeset %s out of range" % numa_memory["nodeset"])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError, e:
            raise error.TestFail("Test failed in positive case.\n error: %s" % e)

        # get left used node beside current using
        if numa_memory.get("placement") == "auto":
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name, "strict", str(node), options, debug=True, ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory["nodeset"] = str(node)
            del pos_numa_memory["placement"]
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise error.TestFail("numa memory config %s not expected after" " live update" % numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node_new = [node_list.index(i) for i in node_list if i != node]
            used_node = [node_list.index(node)]

            mem_compare(used_node, left_node_new)
Esempio n. 7
0
def set_numa_parameter(test, params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host online nodes with memory %s", node_list)

    # Get original numatune memory mode
    ori_mode = ''
    ori_numatune = {}
    if libvirt_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile.find(
            'numatune'):
        ori_numatune = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
        ori_mode = ori_numatune['mode'] if 'mode' in ori_numatune else ''

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict' which
    # should be the same with original mode
    if ori_mode == mode and (ori_numatune.get('nodeset') == nodeset
                             or not nodeset):
        status_error = "no"
    if mode == "strict" and start_vm == "yes":
        status_error = "no"
    if ori_mode and ori_mode != mode and start_vm == "yes":
        status_error = "yes"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if not cgstop:
                test.fail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            used_node = cpus_parser(nodeset)
            if not set(used_node).issubset(node_list):
                test.cancel("Host does not support requested"
                            " nodeset %s" % used_node)
            else:
                test.fail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'mode' or/and 'nodeset' are"
                          " inconsistent with numatune XML")
Esempio n. 8
0
def set_numa_parameter(test, params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host online nodes with memory %s", node_list)

    # Get original numatune memory mode
    ori_mode = ''
    ori_numatune = {}
    if libvirt_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile.find('numatune'):
        ori_numatune = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
        ori_mode = ori_numatune['mode'] if 'mode' in ori_numatune else ''

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict' which
    # should be the same with original mode
    if ori_mode == mode and (ori_numatune.get('nodeset') == nodeset or not nodeset):
        status_error = "no"
    if mode == "strict" and start_vm == "yes":
        status_error = "no"
    if ori_mode and ori_mode != mode and start_vm == "yes":
        status_error = "yes"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if not cgstop:
                test.fail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            used_node = cpus_parser(nodeset)
            if not set(used_node).issubset(node_list):
                test.cancel("Host does not support requested"
                            " nodeset %s" % used_node)
            else:
                test.fail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'mode' or/and 'nodeset' are"
                          " inconsistent with numatune XML")
Esempio n. 9
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    with open(config_path, 'a') as f:
        pass
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop",
                        ignore_status=True,
                        shell=True)
            process.run("virtlogd -d", shell=True)
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted sucessfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise exceptions.TestFail("numad usage not found in libvirtd"
                                          " log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = cpu.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [
                total_online_node_list.index(i) for i in total_online_node_list
                if i != node
            ]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True, shell=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True,
                        shell=True)
        except path.CmdNotFoundError:
            pass
        if vm.is_alive():
            vm.destroy(gracefully=False)
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        backup_xml.sync()