Ejemplo n.º 1
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        if not status_error == "yes":
            # check if topology is defined and change vcpu accordingly
            try:
                vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(source)
                topology = vmxml_backup.get_cpu_topology()
                sockets = str(int(topology['sockets']) + 1)
                cores = topology['cores']
                threads = topology['threads']
                vmcpu_xml = vm_xml.VMCPUXML()
                vmcpu_xml['topology'] = {
                    'sockets': sockets,
                    'cores': cores,
                    'threads': threads
                }
                vmxml_backup['cpu'] = vmcpu_xml
                vmxml_backup.sync()
                expected_vcpu = str(int(sockets) * int(cores) * int(threads))
            except:
                expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            vmxml.sync()
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        vmxml.sync()
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 2
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        if not status_error == "yes":
            # check if topology is defined and change vcpu accordingly
            try:
                vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(source)
                topology = vmxml_backup.get_cpu_topology()
                sockets = str(int(topology['sockets']) + 1)
                cores = topology['cores']
                threads = topology['threads']
                vmcpu_xml = vm_xml.VMCPUXML()
                vmcpu_xml['topology'] = {'sockets': sockets, 'cores': cores,
                                         'threads': threads}
                vmxml_backup['cpu'] = vmcpu_xml
                vmxml_backup.sync()
                expected_vcpu = str(int(sockets) * int(cores) * int(threads))
            except:
                expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            vmxml.sync()
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        vmxml.sync()
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 3
0
def get_vcpucount_details(vm, options):
    """
    To get vcpucount output

    :param vm: VM object
    :param options: options to passed to vcpucount

    :return: tuple of result and dict of vcpucount output values
    """
    vcpucount_details = {
        'max_config': None,
        'max_live': None,
        'cur_config': None,
        'cur_live': None,
        'guest_live': None
    }

    result = virsh.vcpucount(vm.name, options, ignore_status=True, debug=True)
    if result.exit_status:
        logging.debug("vcpu count command failed")
        return (result, vcpucount_details)

    if options:
        stdout = result.stdout_text.strip()
        if 'guest' in options:
            vcpucount_details['guest_live'] = int(stdout)
        elif 'config' in options:
            if 'maximum' in options:
                vcpucount_details['max_config'] = int(stdout)
            else:
                vcpucount_details['cur_config'] = int(stdout)
        elif 'live' in options:
            if 'maximum' in options:
                vcpucount_details['max_live'] = int(stdout)
            else:
                vcpucount_details['cur_live'] = int(stdout)
    else:
        output = result.stdout_text.strip().split('\n')
        for item in output:
            if ('maximum' in item) and ('config' in item):
                vcpucount_details['max_config'] = int(item.split()[2].strip())
            elif ('maximum' in item) and ('live' in item):
                vcpucount_details['max_live'] = int(item.split()[2].strip())
            elif ('current' in item) and ('config' in item):
                vcpucount_details['cur_config'] = int(item.split()[2].strip())
            elif ('current' in item) and ('live' in item):
                vcpucount_details['cur_live'] = int(item.split()[2].strip())
            else:
                pass
    return (result, vcpucount_details)
Ejemplo n.º 4
0
def check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option=""):
    """
    Check domain vcpu, including vcpucount, vcpuinfo, vcpupin, vcpu number and
    cputune in domain xml, vcpu number inside the domain, and cpu-stats.

    :param vm: VM object
    :param expect_vcpu_num: A list of expect vcpu number:
        expect_vcpu_num[0] = maximum config vcpu nubmer
        expect_vcpu_num[1] = maximum live vcpu number
        expect_vcpu_num[2] = current config vcpu number
        expect_vcpu_num[3] = current live vcpu number
        expect_vcpu_num[4] = vcpu number inside the domain
    :param expect_vcpupin: A Dict of expect vcpu affinity
    :param setvcpu_option: Option for virsh setvcpus command
    """
    logging.debug("Expect vcpu number: %s", expect_vcpu_num)
    # Check virsh vcpucount output
    vcpucount_option = ""
    if setvcpu_option == "--guest" and vm.state() == "running":
        vcpucount_option = "--guest"
    result = virsh.vcpucount(vm.name,
                             vcpucount_option,
                             ignore_status=True,
                             debug=True)
    libvirt.check_exit_status(result)
    output = result.stdout.strip()
    if vcpucount_option == "--guest":
        if output != expect_vcpu_num[-1]:
            raise error.TestFail("Virsh vcpucount output is unexpected")
    else:
        elems = len(output.splitlines())
        for i in range(elems):
            # If domain is not alive, vcpucpunt output is:
            # #virsh vcpucount test
            #  maximum      config         4
            #  current      config         1
            # Which are correspond to expect_vcpu_num[0] and expect_vcpu[2]
            if vm.is_alive():
                j = i
            else:
                j = i + i
            try:
                if output.splitlines()[i].split()[-1] != expect_vcpu_num[j]:
                    raise error.TestFail(
                        "Virsh vcpucount output is unexpected")
            except IndexError, detail:
                raise error.TestFail(detail)
Ejemplo n.º 5
0
def get_vcpucount_details(vm, options):
    """
    To get vcpucount output

    :param vm: VM object
    :param options: options to passed to vcpucount

    :return: tuple of result and dict of vcpucount output values
    """
    vcpucount_details = {'max_config': None, 'max_live': None,
                         'cur_config': None, 'cur_live': None,
                         'guest_live': None}

    result = virsh.vcpucount(vm.name, options, ignore_status=True,
                             debug=True)
    if results_stderr_52lts(result):
        logging.debug("vcpu count command failed")
        return (result, vcpucount_details)

    if options:
        stdout = results_stdout_52lts(result).strip()
        if 'guest' in options:
            vcpucount_details['guest_live'] = int(stdout)
        elif 'config' in options:
            if 'maximum' in options:
                vcpucount_details['max_config'] = int(stdout)
            else:
                vcpucount_details['cur_config'] = int(stdout)
        elif 'live' in options:
            if 'maximum' in options:
                vcpucount_details['max_live'] = int(stdout)
            else:
                vcpucount_details['cur_live'] = int(stdout)
    else:
        output = results_stdout_52lts(result).strip().split('\n')
        for item in output:
            if ('maximum' in item) and ('config' in item):
                vcpucount_details['max_config'] = int(item.split()[2].strip())
            elif ('maximum' in item) and ('live' in item):
                vcpucount_details['max_live'] = int(item.split()[2].strip())
            elif ('current' in item) and ('config' in item):
                vcpucount_details['cur_config'] = int(item.split()[2].strip())
            elif ('current' in item) and ('live' in item):
                vcpucount_details['cur_live'] = int(item.split()[2].strip())
            else:
                pass
    return (result, vcpucount_details)
Ejemplo n.º 6
0
def check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option=""):
    """
    Check domain vcpu, including vcpucount, vcpuinfo, vcpupin, vcpu number and
    cputune in domain xml, vcpu number inside the domain, and cpu-stats.

    :param vm: VM object
    :param expect_vcpu_num: A list of expect vcpu number:
        expect_vcpu_num[0] = maximum config vcpu nubmer
        expect_vcpu_num[1] = maximum live vcpu number
        expect_vcpu_num[2] = current config vcpu number
        expect_vcpu_num[3] = current live vcpu number
        expect_vcpu_num[4] = vcpu number inside the domain
    :param expect_vcpupin: A Dict of expect vcpu affinity
    :param setvcpu_option: Option for virsh setvcpus command
    """
    logging.debug("Expect vcpu number: %s", expect_vcpu_num)
    #if setvcpu_option != "--guest":
    # Check virsh vcpucount output
    vcpucount_option = ""
    if setvcpu_option == "--guest" and vm.state() == "running":
        vcpucount_option = "--guest"
    result = virsh.vcpucount(vm.name, vcpucount_option, ignore_status=True,
                             debug=True)
    libvirt.check_exit_status(result)
    output = result.stdout.strip()
    if vcpucount_option == "--guest":
        if output != expect_vcpu_num[-1]:
            raise error.TestFail("Virsh vcpucount output is unexpected")
    else:
        elems = len(output.splitlines())
        for i in range(elems):
            # If domain is not alive, vcpucpunt output is:
            # #virsh vcpucount test
            #  maximum      config         4
            #  current      config         1
            # Which are correspond to expect_vcpu_num[0] and expect_vcpu[2]
            if vm.is_alive():
                j = i
            else:
                j = i + i
            try:
                if output.splitlines()[i].split()[-1] != expect_vcpu_num[j]:
                    raise error.TestFail("Virsh vcpucount output is unexpected")
            except IndexError, detail:
                raise error.TestFail(detail)
Ejemplo n.º 7
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 8
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = exec_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = exec_edit(vm_name, [dic_mode["recover"]])
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 9
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    start_vm_after_config = params.get('start_vm_after_config', 'yes') == 'yes'

    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            daemon_conf_dict = {
                "log_level": "1",
                "log_filters":
                "\"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event\"",
                "log_outputs": "\"1:file:{}\"".format(config_path)
            }
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()
        logging.debug("Before starting, VM xml:"
                      "\n%s", vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))
        # Start VM
        if start_vm_after_config:
            logging.info("Start VM with vcpu hotpluggable and order...")
            ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            if start_vm_after_config:
                # Wait for domain
                vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            if start_vm_after_config:
                cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" %
                       (vm_name, vcpus_max))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd and start_vm_after_config:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            expect_num = 2 if start_vm_after_config else 1
            if len(max_list) != expect_num:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            expect_num = vcpus_crt if start_vm_after_config else int(
                config_vcpus)
            if len(vcpu_lines) != expect_num:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if start_vm_after_config and not cpu.check_if_vm_vcpu_match(
                    vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                check_vcpu_after_plug_unplug(test, vm_name, config_vcpus)

            # Restart libvirtd
            libvirtd.restart()
            if config_vcpus and not start_vm_after_config:
                check_vm_exist(test, vm_name, 'shut off')
            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            if start_vm_after_config:
                en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
                for vcpu_sn in range(len(en_vcpu_list)):
                    vcpu_id = en_vcpu_list[vcpu_sn].split(
                        "=")[1].split()[0].strip('\'')
                    cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid())
                    cg_path = cg_obj.get_cgroup_path("cpuset")
                    if cg_obj.is_cgroup_v2_enabled():
                        vcpu_path = os.path.join(cg_path, "vcpu%s" % vcpu_id)
                    else:
                        vcpu_path = os.path.join(cg_path,
                                                 "../vcpu%s" % vcpu_id)
                    if not os.path.exists(vcpu_path):
                        test.fail(
                            "Failed to find the enabled vcpu{} in {}.".format(
                                vcpu_id, cg_path))
    finally:
        # Recover libvirtd configuration
        if config_libvirtd and 'daemon_conf' in locals():
            libvirt.customize_libvirt_config(None,
                                             remote_host=False,
                                             is_recover=True,
                                             config_object=daemon_conf)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Ejemplo n.º 10
0
def run(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = params.get("status_error")
    maxvcpu = int(params.get("vcpucount_maxvcpu", "4"))
    curvcpu = int(params.get("vcpucount_current", "1"))
    sockets = int(params.get("sockets", "1"))
    cores = int(params.get("cores", "4"))
    threads = int(params.get("threads", "1"))
    livevcpu = curvcpu + threads
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # Early death
    # 1.1 More than two options not supported
    if len(options.split()) > 2:
        test.cancel("Options exceeds 2 is not supported")

    # 1.2 Check for all options
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match("vcpucount", item) is None:
            test.cancel("The current libvirt "
                        "version doesn't support "
                        "'%s' option" % item)
    # 1.3 Check for vcpu values
    if (sockets and cores and threads):
        if int(maxvcpu) != int(sockets) * int(cores) * int(threads):
            test.cancel("Invalid topology definition, VM will not start")

    try:
        # Prepare domain
        reset_domain(vm, pre_vm_state, maxvcpu, curvcpu, sockets, cores,
                     threads, ("--guest" in options))

        # Perform guest vcpu hotplug
        for idx in range(len(set_option)):
            # Remove topology for maximum config
            # https://bugzilla.redhat.com/show_bug.cgi?id=1426220
            if idx == 1:
                del_topology(vm, pre_vm_state)
            # Hotplug domain vcpu
            result = virsh.setvcpus(vm_name,
                                    livevcpu,
                                    set_option[idx],
                                    ignore_status=True,
                                    debug=True)
            setvcpus_status = result.exit_status

            # Call virsh vcpucount with option
            result = virsh.vcpucount(vm_name,
                                     options,
                                     ignore_status=True,
                                     debug=True)
            output = result.stdout.strip()
            vcpucount_status = result.exit_status

            if "--guest" in options:
                if result.stderr.count("doesn't support option") or \
                   result.stderr.count("command guest-get-vcpus has not been found"):
                    reset_env(vm_name, xml_file)
                    test.fail("Option %s is not supported" % options)

            # Reset domain
            reset_domain(vm, pre_vm_state, maxvcpu, curvcpu, sockets, cores,
                         threads, ("--guest" in options))

            # Check result
            if status_error == "yes":
                if vcpucount_status == 0:
                    reset_env(vm_name, xml_file)
                    test.fail("Run successfully with wrong command!")
                else:
                    logging.info("Run failed as expected")
            else:
                if vcpucount_status != 0:
                    reset_env(vm_name, xml_file)
                    test.fail("Run command failed with options %s" % options)
                elif setvcpus_status == 0:
                    if pre_vm_state == "shut off":
                        if idx == 0:
                            expect_out = [maxvcpu, livevcpu]
                            chk_output_shutoff(output, expect_out, options)
                        elif idx == 1:
                            expect_out = [livevcpu, curvcpu]
                            chk_output_shutoff(output, expect_out, options)
                        else:
                            reset_env(vm_name, xml_file)
                            test.fail("setvcpus should failed")
                    else:
                        if idx == 0:
                            expect_out = [
                                maxvcpu, maxvcpu, livevcpu, curvcpu, curvcpu
                            ]
                            chk_output_running(output, expect_out, options)
                        elif idx == 1:
                            expect_out = [
                                livevcpu, maxvcpu, curvcpu, curvcpu, curvcpu
                            ]
                            chk_output_running(output, expect_out, options)
                        elif idx == 2:
                            expect_out = [
                                maxvcpu, maxvcpu, curvcpu, livevcpu, livevcpu
                            ]
                            chk_output_running(output, expect_out, options)
                        else:
                            expect_out = [
                                maxvcpu, maxvcpu, curvcpu, curvcpu, livevcpu
                            ]
                            chk_output_running(output, expect_out, options)
                else:
                    if pre_vm_state == "shut off":
                        expect_out = [maxvcpu, curvcpu]
                        chk_output_shutoff(output, expect_out, options)
                    else:
                        expect_out = [
                            maxvcpu, maxvcpu, curvcpu, curvcpu, curvcpu
                        ]
                        chk_output_running(output, expect_out, options)
    finally:
        # Recover env
        reset_env(vm_name, xml_file)
Ejemplo n.º 11
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            vcpus_affinity[item.split(':')[0].strip()] = item.split(':')[1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        logging.debug("Debug: cpulist %s", cpu_list)
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expected affinity: %s", expected_output)

        # Check for affinity value from vcpuinfo output
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)
        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpuinfo command output")

        # Check for affinity value from vcpupin output
        actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu)
        logging.debug("Actual affinity in vcpupin output: %s", actual_output_vcpupin)
        if expected_output == actual_output_vcpupin:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpupin command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            test.fail("Cpu pinning details are not "
                      "updated properly in /proc/"
                      "%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is positive case.
                test.fail(cmdResult)
            else:
                # Command fail and it is negative case.
                return
        else:
            if status_error:
                # Command success and it is negative case.
                test.fail(cmdResult)
            else:
                # Command success and it is positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        test.cancel("This version of libvirt doesn't"
                    " support this test")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no"))

    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if start_vm and vm.state() == "shut off":
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    # Find the alive cpus list
    cpus_list = map(str, cpuutils.cpu_online_list())
    logging.info("Active cpus in host are %s", cpus_list)

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        # Before doing any vcpupin actions, lets check whether
        # initial pinning state is fine
        if vcpupin_initial:
            pid = vm.get_pid()
            logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(), guest_vcpu_count)
            for vcpu in range(int(guest_vcpu_count)):
                vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, str(','.join(cpus_list)), pid, vcpu_pid)
            return

        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                test.error("Need more than one domains")
            if not vm2:
                test.cancel("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_online_cpu_count = len(cpus_list)
        online_cpu_max = max(map(int, cpus_list))
        host_cpu_count = cpuutils.total_cpus_count()
        cpu_max = int(host_cpu_count) - 1
        if (host_online_cpu_count < 2) and (not cpu_list == "x"):
            test.cancel("We need more cpus on host in this "
                        "case for the cpu_list=%s. But "
                        "current number of cpu on host is %s."
                        % (cpu_list, host_online_cpu_count))

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (online_cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % online_cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (online_cpu_max, online_cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    test.cancel("Cpu_list=%s is not recognized."
                                % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
Ejemplo n.º 12
0
def run_virsh_vcpupin(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(domname, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param domname: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(domname).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def check_vcpupin(domname, vcpu, cpu_list, pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param domname:  VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        """

        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            host_cpu_count)
        actual_output = affinity_from_vcpuinfo(domname, vcpu)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise error.TestFail("Command 'virsh vcpupin %s %s %s'not "
                                 "succeeded, cpu pinning details not "
                                 "updated properly in virsh vcpuinfo "
                                 "command output" % (vm_name, vcpu, cpu_list))

        if pid is None:
            return
        # Get the vcpus pid
        vcpus_pid = vm.get_vcpus_pid()
        vcpu_pid = vcpus_pid[vcpu]
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            host_cpu_count)

        if expected_output == actual_output_proc:
            logging.info("successfully pinned cpu: %s --> vcpu: %s"
                         " in respective proc entry", cpu_list, vcpu)
        else:
            raise error.TestFail("Command 'virsh vcpupin %s %s %s'not "
                                 "succeeded cpu pinning details not "
                                 "updated properly in /proc/%s/task/%s/status"
                                 % (vm_name, vcpu, cpu_list, pid, vcpu_pid))

    def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid):
        """
        Run the vcpupin command and then check the result.
        """
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is in positive case.
                raise error.TestFail(cmdResult)
            else:
                # Command fail and it is in negative case.
                return
        else:
            if status_error:
                # Command success and it is in negative case.
                raise error.TestFail(cmdResult)
            else:
                # Command success and it is in positive case.
                # "--config" will take effect after VM destroyed.
                if options == "--config":
                    virsh.destroy(vm_name)
                    pid = None
                # Check the result of vcpupin command.
                check_vcpupin(vm_name, vcpu, cpu_list, pid)

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError("This version of libvirt doesn't"
                                " support this test")
    # Get the vm name, pid of vm and check for alive
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    pid = vm.get_pid()
    # Get the variables for vcpupin command.
    args = params.get("vcpupin_args", "dom_name")
    if args == "dom_name":
        args = vm_name
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Run cases when guest is shutoff.
    if vm.is_dead() and (params.get("start_vm") == "no"):
        run_and_check_vcpupin(args, 0, 0, "", 0)
        return
    # Get the host cpu count
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case "
                                "for the cpu_list=%s. But current number of "
                                "cpu on host is %s."
                                % (cpu_list, host_cpu_count))

    # Get the guest vcpu count
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       "--live --active").stdout.strip()

    # Run test case
    for vcpu in range(int(guest_vcpu_count)):
        if cpu_list == "x":
            for cpu in range(int(host_cpu_count)):
                run_and_check_vcpupin(args, vcpu, str(cpu), options, pid)
        else:
            cpu_max = int(host_cpu_count) - 1
            if cpu_list == "x-y":
                cpus = "0-%s" % cpu_max
            elif cpu_list == "x,y":
                cpus = "0,%s" % cpu_max
            elif cpu_list == "x-y,^z":
                cpus = "0-%s,^%s" % (cpu_max, cpu_max)
            elif cpu_list == "r":
                cpus = "r"
            elif cpu_list == "-1":
                cpus = "-1"
            elif cpu_list == "out_of_max":
                cpus = str(cpu_max + 1)
            else:
                raise error.TestNAError("Cpu_list=%s is not recognized."
                                        % cpu_list)
            run_and_check_vcpupin(args, vcpu, cpus, options, pid)
Ejemplo n.º 13
0
def run(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config --maximum")
    if vcpucount_result.exit_status:
        # Fail back to libvirt_xml way to test vcpucount.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        original_vcpu = str(vmxml.vcpu)
    else:
        original_vcpu = vcpucount_result.stdout.strip()

    expected_vcpu = str(int(original_vcpu) + 1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu information.

        :param source : virsh edit's option.
        :param dic_mode : a edit commad line .
        :return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh -c %s edit %s" % (vm.connect_uri, source))
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vcpus = str(vmxml.vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    # run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    # check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
Ejemplo n.º 14
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Ejemplo n.º 15
0
def run(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = "yes" == params.get("status_error", "no")
    maxvcpu = int(params.get("vcpucount_maxvcpu", "4"))
    curvcpu = int(params.get("vcpucount_current", "1"))
    sockets = int(params.get("sockets", "1"))
    cores = int(params.get("cores", "4"))
    threads = int(params.get("threads", "1"))
    expect_msg = params.get("vcpucount_err_msg")
    livevcpu = curvcpu + threads
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # Early death
    # 1.1 More than two options not supported
    if len(options.split()) > 2:
        test.cancel("Options exceeds 2 is not supported")

    # 1.2 Check for all options
    option_list = options.split(" ")
    if not status_error:
        for item in option_list:
            if virsh.has_command_help_match("vcpucount", item) is None:
                test.cancel("The current libvirt version doesn't support "
                            "'%s' option" % item)
    # 1.3 Check for vcpu values
    if (sockets and cores and threads):
        if int(maxvcpu) != int(sockets) * int(cores) * int(threads):
            test.cancel("Invalid topology definition, VM will not start")

    try:
        # Prepare domain
        reset_domain(vm, pre_vm_state, maxvcpu, curvcpu,
                     sockets, cores, threads, ("--guest" in options))

        # Perform guest vcpu hotplug
        for idx in range(len(set_option)):
            # Remove topology for maximum config
            # https://bugzilla.redhat.com/show_bug.cgi?id=1426220
            if idx == 1:
                del_topology(vm, pre_vm_state)
            # Hotplug domain vcpu
            result = virsh.setvcpus(vm_name, livevcpu, set_option[idx],
                                    ignore_status=True, debug=True)
            setvcpus_status = result.exit_status

            # Call virsh vcpucount with option
            result = virsh.vcpucount(vm_name, options, ignore_status=True,
                                     debug=True)
            output = result.stdout.strip()
            vcpucount_status = result.exit_status

            if "--guest" in options:
                if result.stderr.count("doesn't support option") or \
                   result.stderr.count("command guest-get-vcpus has not been found"):
                    test.fail("Option %s is not supported" % options)

            # Reset domain
            reset_domain(vm, pre_vm_state, maxvcpu, curvcpu,
                         sockets, cores, threads, ("--guest" in options))

            # Check result
            if status_error:
                if vcpucount_status == 0:
                    test.fail("Run successfully with wrong command!")
                else:
                    logging.info("Run failed as expected")
                    if expect_msg:
                        libvirt.check_result(result, expect_msg.split(';'))
            else:
                if vcpucount_status != 0:
                    test.fail("Run command failed with options %s" %
                              options)
                elif setvcpus_status == 0:
                    if pre_vm_state == "shut off":
                        if idx == 0:
                            expect_out = [maxvcpu, livevcpu]
                            chk_output_shutoff(output, expect_out, options, test)
                        elif idx == 1:
                            expect_out = [livevcpu, curvcpu]
                            chk_output_shutoff(output, expect_out, options, test)
                        else:
                            test.fail("setvcpus should failed")
                    else:
                        if idx == 0:
                            expect_out = [maxvcpu, maxvcpu, livevcpu,
                                          curvcpu, curvcpu]
                            chk_output_running(output, expect_out, options, test)
                        elif idx == 1:
                            expect_out = [livevcpu, maxvcpu, curvcpu,
                                          curvcpu, curvcpu]
                            chk_output_running(output, expect_out, options, test)
                        elif idx == 2:
                            expect_out = [maxvcpu, maxvcpu, curvcpu,
                                          livevcpu, livevcpu]
                            chk_output_running(output, expect_out, options, test)
                        else:
                            expect_out = [maxvcpu, maxvcpu, curvcpu,
                                          curvcpu, livevcpu]
                            chk_output_running(output, expect_out, options, test)
                else:
                    if pre_vm_state == "shut off":
                        expect_out = [maxvcpu, curvcpu]
                        chk_output_shutoff(output, expect_out, options, test)
                    else:
                        expect_out = [
                            maxvcpu, maxvcpu, curvcpu, curvcpu, curvcpu]
                        chk_output_running(output, expect_out, options, test)
    finally:
        # Recover env
        reset_env(vm_name, xml_file)
Ejemplo n.º 16
0
def run_virsh_edit(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config")
    if vcpucount_result.exit_status:
        raise error.TestError("Failed to get vcpucount. Detail:\n%s"
                              % vcpucount_result)
    original_vcpu = vcpucount_result.stdout.strip()
    expected_vcpu = str(int(original_vcpu)+1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu infomation.

        @param: source : virsh edit's option.
        @param: dic_mode : a edit commad line .
        @return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh edit %s" % source)
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            # use sleep(1) to make sure the modify has been completed.
            time.sleep(1)
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu infomation by virsh edit command.

        @param: source : virsh edit's option.
        @param: guest_name : vm's name.
        @return: True if edit successed,False if edit failed.
        """
        dic_mode = {"edit": ":%s /[0-9]*<\/vcpu>/"+expected_vcpu+"<\/vcpu>",
                    "recover": ":%s /[0-9]*<\/vcpu>/"+original_vcpu+"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        #Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    #run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    #recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    #Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    #check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
Ejemplo n.º 17
0
def run_virsh_vcpucount(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = params.get("status_error")
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # maximum options should be 2
    if len(options.split()) > 2:
        raise error.TestNAError("Options exceeds 2 is not supported")

    # Prepare domain
    reset_domain(vm, pre_vm_state)

    # Perform guest vcpu hotplug
    for i in range(len(set_option)):
        # Hotplug domain vcpu
        result = virsh.setvcpus(vm_name, 2, set_option[i], ignore_status=True,
                                debug=True)
        setvcpus_status = result.exit_status

        # Call virsh vcpucount with option
        result = virsh.vcpucount(vm_name, options, ignore_status=True,
                                 debug=True)
        output = result.stdout.strip()
        vcpucount_status = result.exit_status

        if "--guest" in options:
            if result.stderr.count("doesn't support option"):
                raise error.TestNAError("Option %s is not supported" % options)

        # Reset domain
        reset_domain(vm, pre_vm_state)

        # Check result
        if status_error == "yes":
            if vcpucount_status == 0:
                raise error.TestFail("Run successfully with wrong command!")
            else:
                logging.info("Run failed as expected")
        else:
            if vcpucount_status != 0:
                raise error.TestFail("Run command failed with options %s" %
                                     options)
            elif setvcpus_status == 0:
                if pre_vm_state == "shut off":
                    if i == 0:
                        expect_out = [4, 2]
                        chk_output_shutoff(output, expect_out, options)
                    elif i == 1:
                        expect_out = [2, 1]
                        chk_output_shutoff(output, expect_out, options)
                    else:
                        raise error.TestFail("setvcpus should failed")
                else:
                    if i == 0:
                        expect_out = [4, 4, 2, 1, 1]
                        chk_output_running(output, expect_out, options)
                    elif i == 1:
                        expect_out = [2, 4, 1, 1, 1]
                        chk_output_running(output, expect_out, options)
                    elif i == 2:
                        expect_out = [4, 4, 1, 2, 2]
                        chk_output_running(output, expect_out, options)
                    else:
                        expect_out = [4, 4, 1, 1, 2]
                        chk_output_running(output, expect_out, options)
            else:
                if pre_vm_state == "shut off":
                    expect_out = [4, 1]
                    chk_output_shutoff(output, expect_out, options)
                else:
                    expect_out = [4, 4, 1, 1, 1]
                    chk_output_running(output, expect_out, options)

    # Recover env
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
Ejemplo n.º 18
0
def run(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = params.get("status_error")
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # maximum options should be 2
    if len(options.split()) > 2:
        raise error.TestNAError("Options exceeds 2 is not supported")

    try:
        # Prepare domain
        reset_domain(vm, pre_vm_state, ("--guest" in options))

        # Perform guest vcpu hotplug
        for i in range(len(set_option)):
            # Hotplug domain vcpu
            result = virsh.setvcpus(vm_name,
                                    2,
                                    set_option[i],
                                    ignore_status=True,
                                    debug=True)
            setvcpus_status = result.exit_status

            # Call virsh vcpucount with option
            result = virsh.vcpucount(vm_name,
                                     options,
                                     ignore_status=True,
                                     debug=True)
            output = result.stdout.strip()
            vcpucount_status = result.exit_status

            if "--guest" in options:
                if result.stderr.count("doesn't support option") or \
                   result.stderr.count("command guest-get-vcpus has not been found"):
                    reset_env(vm_name, xml_file)
                    raise error.TestNAError("Option %s is not supported" %
                                            options)

            # Reset domain
            reset_domain(vm, pre_vm_state, ("--guest" in options))

            # Check result
            if status_error == "yes":
                if vcpucount_status == 0:
                    reset_env(vm_name, xml_file)
                    raise error.TestFail(
                        "Run successfully with wrong command!")
                else:
                    logging.info("Run failed as expected")
            else:
                if vcpucount_status != 0:
                    reset_env(vm_name, xml_file)
                    raise error.TestFail("Run command failed with options %s" %
                                         options)
                elif setvcpus_status == 0:
                    if pre_vm_state == "shut off":
                        if i == 0:
                            expect_out = [4, 2]
                            chk_output_shutoff(output, expect_out, options)
                        elif i == 1:
                            expect_out = [2, 1]
                            chk_output_shutoff(output, expect_out, options)
                        else:
                            reset_env(vm_name, xml_file)
                            raise error.TestFail("setvcpus should failed")
                    else:
                        if i == 0:
                            expect_out = [4, 4, 2, 1, 1]
                            chk_output_running(output, expect_out, options)
                        elif i == 1:
                            expect_out = [2, 4, 1, 1, 1]
                            chk_output_running(output, expect_out, options)
                        elif i == 2:
                            expect_out = [4, 4, 1, 2, 2]
                            chk_output_running(output, expect_out, options)
                        else:
                            expect_out = [4, 4, 1, 1, 2]
                            chk_output_running(output, expect_out, options)
                else:
                    if pre_vm_state == "shut off":
                        expect_out = [4, 1]
                        chk_output_shutoff(output, expect_out, options)
                    else:
                        expect_out = [4, 4, 1, 1, 1]
                        chk_output_running(output, expect_out, options)

    finally:
        # Recover env
        reset_env(vm_name, xml_file)
Ejemplo n.º 19
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not cpu.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name,
                                             "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(
                    r"vcpu.*current=.%s.*" % config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip(
                    '\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Ejemplo n.º 20
0
def run_virsh_vcpupin(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """
    def affinity_from_vcpuinfo(domname, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param domname: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(domname).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def check_vcpupin(domname, vcpu, cpu_list, pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param domname:  VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        """

        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list, host_cpu_count)
        actual_output = affinity_from_vcpuinfo(domname, vcpu)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise error.TestFail("Command 'virsh vcpupin %s %s %s'not "
                                 "succeeded, cpu pinning details not "
                                 "updated properly in virsh vcpuinfo "
                                 "command output" % (vm_name, vcpu, cpu_list))

        if pid is None:
            return
        # Get the vcpus pid
        vcpus_pid = vm.get_vcpus_pid()
        vcpu_pid = vcpus_pid[vcpu]
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output, host_cpu_count)

        if expected_output == actual_output_proc:
            logging.info(
                "successfully pinned cpu: %s --> vcpu: %s"
                " in respective proc entry", cpu_list, vcpu)
        else:
            raise error.TestFail(
                "Command 'virsh vcpupin %s %s %s'not "
                "succeeded cpu pinning details not "
                "updated properly in /proc/%s/task/%s/status" %
                (vm_name, vcpu, cpu_list, pid, vcpu_pid))

    def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid):
        """
        Run the vcpupin command and then check the result.
        """
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is in positive case.
                raise error.TestFail(cmdResult)
            else:
                # Command fail and it is in negative case.
                return
        else:
            if status_error:
                # Command success and it is in negative case.
                raise error.TestFail(cmdResult)
            else:
                # Command success and it is in positive case.
                # "--config" will take effect after VM destroyed.
                if options == "--config":
                    virsh.destroy(vm_name)
                    pid = None
                # Check the result of vcpupin command.
                check_vcpupin(vm_name, vcpu, cpu_list, pid)

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError("This version of libvirt doesn't"
                                " support this test")
    # Get the vm name, pid of vm and check for alive
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    pid = vm.get_pid()
    # Get the variables for vcpupin command.
    args = params.get("vcpupin_args", "dom_name")
    if args == "dom_name":
        args = vm_name
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Run cases when guest is shutoff.
    if vm.is_dead() and (params.get("start_vm") == "no"):
        run_and_check_vcpupin(args, 0, 0, "", 0)
        return
    # Get the host cpu count
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case "
                                "for the cpu_list=%s. But current number of "
                                "cpu on host is %s." %
                                (cpu_list, host_cpu_count))

    # Get the guest vcpu count
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       "--live --active").stdout.strip()

    # Run test case
    for vcpu in range(int(guest_vcpu_count)):
        if cpu_list == "x":
            for cpu in range(int(host_cpu_count)):
                run_and_check_vcpupin(args, vcpu, str(cpu), options, pid)
        else:
            cpu_max = int(host_cpu_count) - 1
            if cpu_list == "x-y":
                cpus = "0-%s" % cpu_max
            elif cpu_list == "x,y":
                cpus = "0,%s" % cpu_max
            elif cpu_list == "x-y,^z":
                cpus = "0-%s,^%s" % (cpu_max, cpu_max)
            elif cpu_list == "r":
                cpus = "r"
            elif cpu_list == "-1":
                cpus = "-1"
            elif cpu_list == "out_of_max":
                cpus = str(cpu_max + 1)
            else:
                raise error.TestNAError("Cpu_list=%s is not recognized." %
                                        cpu_list)
            run_and_check_vcpupin(args, vcpu, cpus, options, pid)
Ejemplo n.º 21
0
    # Prepare domain
    try:
        reset_domain(vm, pre_vm_state, (options == "--guest"))
    except Exception, details:
        reset_env(vm_name, xml_file)
        error.TestFail(details)

    # Perform guest vcpu hotplug
    for i in range(len(set_option)):
        # Hotplug domain vcpu
        result = virsh.setvcpus(vm_name, 2, set_option[i], ignore_status=True,
                                debug=True)
        setvcpus_status = result.exit_status

        # Call virsh vcpucount with option
        result = virsh.vcpucount(vm_name, options, ignore_status=True,
                                 debug=True)
        output = result.stdout.strip()
        vcpucount_status = result.exit_status

        if "--guest" in options:
            if result.stderr.count("doesn't support option") or \
               result.stderr.count("command guest-get-vcpus has not been found"):
                reset_env(vm_name, xml_file)
                raise error.TestNAError("Option %s is not supported" % options)

        # Reset domain
        reset_domain(vm, pre_vm_state)

        # Check result
        if status_error == "yes":
            if vcpucount_status == 0:
Ejemplo n.º 22
0
def run(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config --maximum")
    if vcpucount_result.exit_status:
        # Fail back to libvirt_xml way to test vcpucount.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        original_vcpu = str(vmxml.vcpu)
    else:
        original_vcpu = vcpucount_result.stdout.strip()

    expected_vcpu = str(int(original_vcpu) + 1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu information.

        :param source : virsh edit's option.
        :param dic_mode : a edit commad line .
        :return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh -c %s edit %s" % (vm.connect_uri, source))
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vcpus = str(vmxml.vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    # run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    # check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
Ejemplo n.º 23
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = utils.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l").stdout
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expecte affinity: %s", expected_output)
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise error.TestFail("Cpu pinning details not updated properly in"
                                 " virsh vcpuinfo command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            raise error.TestFail("Cpu pinning details not updated properly in"
                                 " /proc/%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is in positive case.
                raise error.TestFail(cmdResult)
            else:
                # Command fail and it is in negative case.
                return
        else:
            if status_error:
                # Command success and it is in negative case.
                raise error.TestFail(cmdResult)
            else:
                # Command success and it is in positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError("This version of libvirt doesn't"
                                " support this test")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                raise error.TestError("Need more than one domains")
            if not vm2:
                raise error.TestNAError("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_cpu_count = utils.count_cpus()
        cpu_max = int(host_cpu_count) - 1
        if (int(host_cpu_count) < 2) and (not cpu_list == "x"):
            raise error.TestNAError("We need more cpus on host in this case "
                                    "for the cpu_list=%s. But current number "
                                    "of cpu on host is %s."
                                    % (cpu_list, host_cpu_count))

        # Find the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("Active cpus in host are %s", cpus_list)

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (cpu_max, cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    raise error.TestNAError("Cpu_list=%s is not recognized."
                                            % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
Ejemplo n.º 24
0
def run_virsh_vcpupin(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    (4) TODO: Right now the testcase covers the pinning one cpu at a time
              this can be improved by a random number of cpus
    """

    def build_actual_info(domname, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param domname: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(domname)
        cmd = re.findall('[^Affinity:][-y]+', str(output))
        total_affinity = cmd[vcpu].lstrip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def build_expected_info(cpu):
        """
        This function returns the list of vcpu's expected affinity build

        :param cpu: cpu details for the affinity
        """

        expected_affinity = []

        for i in range(int(host_cpu_count)):
            expected_affinity.append('y')

        for i in range(int(host_cpu_count)):
            if cpu != i:
                expected_affinity[i] = '-'

        expected_affinity_proc = int(math.pow(2, cpu))
        return expected_affinity, expected_affinity_proc

    def virsh_check_vcpupin(domname, vcpu, cpu, pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param domname:  VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        """

        expected_output, expected_output_proc = build_expected_info(cpu)
        actual_output = build_actual_info(domname, vcpu)

        # Get the vcpus pid
        vcpus_pid = vm.get_vcpus_pid()
        vcpu_pid = vcpus_pid[vcpu]

        # Get the actual cpu affinity value in the proc entry
        output = utils.cpu_affinity_by_task(pid, vcpu_pid)
        actual_output_proc = int(output, 16)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu: %s --> vcpu: %s", cpu, vcpu)
        else:
            raise error.TestFail("Command 'virsh vcpupin %s %s %s'not succeeded"
                                 ", cpu pinning details not updated properly in"
                                 " virsh vcpuinfo command output"
                                 % (vm_name, vcpu, cpu))

        if expected_output_proc == actual_output_proc:
            logging.info("successfully pinned cpu: %s --> vcpu: %s"
                         " in respective proc entry", cpu, vcpu)
        else:
            raise error.TestFail("Command 'virsh vcpupin %s %s %s'not succeeded"
                                 " cpu pinning details not updated properly in"
                                 " /proc/%s/task/%s/status"
                                 % (vm_name, vcpu, cpu, pid, vcpu_pid))

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError("This version of libvirt doesn't"
                                " support this test")
    # Get the vm name, pid of vm and check for alive
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()

    # Get the host cpu count
    host_cpu_count = utils.count_cpus()

    # Get the guest vcpu count
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       "--live --active").stdout.strip()

    # Run test case
    for vcpu in range(int(guest_vcpu_count)):
        for cpu in range(int(host_cpu_count)):
            vm.vcpupin(vcpu, cpu)
            virsh_check_vcpupin(vm_name, vcpu, cpu, pid)