def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    start_vm_after_config = params.get('start_vm_after_config', 'yes') == 'yes'

    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            daemon_conf_dict = {
                "log_level": "1",
                "log_filters":
                "\"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event\"",
                "log_outputs": "\"1:file:{}\"".format(config_path)
            }
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()
        logging.debug("Before starting, VM xml:"
                      "\n%s", vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))
        # Start VM
        if start_vm_after_config:
            logging.info("Start VM with vcpu hotpluggable and order...")
            ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            if start_vm_after_config:
                # Wait for domain
                vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            if start_vm_after_config:
                cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" %
                       (vm_name, vcpus_max))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd and start_vm_after_config:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            expect_num = 2 if start_vm_after_config else 1
            if len(max_list) != expect_num:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            expect_num = vcpus_crt if start_vm_after_config else int(
                config_vcpus)
            if len(vcpu_lines) != expect_num:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if start_vm_after_config and not cpu.check_if_vm_vcpu_match(
                    vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                check_vcpu_after_plug_unplug(test, vm_name, config_vcpus)

            # Restart libvirtd
            libvirtd.restart()
            if config_vcpus and not start_vm_after_config:
                check_vm_exist(test, vm_name, 'shut off')
            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            if start_vm_after_config:
                en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
                for vcpu_sn in range(len(en_vcpu_list)):
                    vcpu_id = en_vcpu_list[vcpu_sn].split(
                        "=")[1].split()[0].strip('\'')
                    cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid())
                    cg_path = cg_obj.get_cgroup_path("cpuset")
                    if cg_obj.is_cgroup_v2_enabled():
                        vcpu_path = os.path.join(cg_path, "vcpu%s" % vcpu_id)
                    else:
                        vcpu_path = os.path.join(cg_path,
                                                 "../vcpu%s" % vcpu_id)
                    if not os.path.exists(vcpu_path):
                        test.fail(
                            "Failed to find the enabled vcpu{} in {}.".format(
                                vcpu_id, cg_path))
    finally:
        # Recover libvirtd configuration
        if config_libvirtd and 'daemon_conf' in locals():
            libvirt.customize_libvirt_config(None,
                                             remote_host=False,
                                             is_recover=True,
                                             config_object=daemon_conf)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemple #3
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not cpu.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name,
                                             "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(
                    r"vcpu.*current=.%s.*" % config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip(
                    '\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemple #4
0
def run(test, params, env):
    """
    Test setvcpu feature as follows:
    positive test:
        1. run virsh setvcpu with option --enable and --disable on inactive vm
           and check xml
        2. run virsh setvcpu with option --enable and --disable on active vm and
           check xml and number of online vcpu
        3. run virsh setvcpu with option --enable, --disable and --config on
           active vm and check inactive xml
        4. check the vcpu order when hot plug/unplug specific vcpu
    negative test:
        1. run virsh setvcpu with more than one vcpu on active vm and check error
        2. run virsh setvcpu to hotplug/unplug invalid vcpu and check error
        3. enable/disable vcpu0 when vm is active/inactive and check error
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpu_placement = params.get("vcpu_placement", "static")
    maxvcpu = int(params.get("maxvcpu", "8"))
    vcpu_current = params.get("vcpu_current", "1")
    vcpus_enabled = eval(params.get("vcpus_enabled", "{0}"))
    vcpus_hotplug = eval(params.get("vcpus_hotpluggable", "{0}"))
    setvcpu_option = eval(params.get("setvcpu_option", "{}"))
    setvcpu_action = params.get("setvcpu_action", "")
    start_timeout = int(params.get("start_timeout", "60"))
    check = params.get("check", "")
    err_msg = params.get("err_msg", "")
    status_error = "yes" == params.get("status_error", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not cpu.check_if_vm_vcpu_match(cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")

    def get_vcpu_order(vmxml):
        """
        return a {vcpu:order} dict based on vcpus in xml

        :param vmxml: the instance of VMXML class
        """
        vcpu_order = {}
        # get vcpu order based on the online vcpu
        for cpu_id in range(maxvcpu):
            if vmxml.vcpus.vcpu[cpu_id].get('enabled') == "yes":
                vcpu_order[cpu_id] = int(vmxml.vcpus.vcpu[cpu_id].get('order'))

        logging.debug("vcpu order based on vcpus in xml {}".format(vcpu_order))
        return vcpu_order.copy()

    def check_vcpu_order(cpulist, cpu_option, vmxml_pre):
        """
        check the value of vcpu order in xml. when the online vcpu changes,
        the order should be redefined.

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu such as config, enable and live
        :param vmxml_pre: the instance of VMXML class before run setvcpu
        """
        # only one vcpu is valid in the live operation of setvcpu command
        if len(cpulist) == 1:
            vcpu = cpulist[0]
        else:
            test.fail("wrong vcpu value from cfg file")

        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # get vcpus order dict from previous xml
        order_pre = get_vcpu_order(vmxml_pre)
        # get vcpus order dict from updated xml
        order_new = get_vcpu_order(vmxml_new)

        # calculate the right dict of vcpu order based on the previous one
        if "enable" in cpu_option:
            order_expect = order_pre.copy()
            order_expect[vcpu] = len(order_pre) + 1
        elif "disable" in cpu_option:
            for vcpuid, order in order_pre.items():
                if order > order_pre[vcpu]:
                    order_pre[vcpuid] = order - 1
            order_pre.pop(vcpu)
            order_expect = order_pre.copy()

        if order_expect != order_new:
            test.fail("vcpu order check fail")

    try:
        # define vcpu in xml
        vmxml.placement = vcpu_placement
        vmxml.vcpu = maxvcpu
        vmxml.current_vcpu = vcpu_current
        del vmxml.cpuset

        # define vcpus in xml
        vcpu_list = []
        vcpu = {}

        for vcpu_id in range(maxvcpu):
            vcpu['id'] = str(vcpu_id)

            if vcpu_id in vcpus_enabled:
                vcpu['enabled'] = 'yes'
            else:
                vcpu['enabled'] = 'no'

            if vcpu_id in vcpus_hotplug:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(vcpu.copy())

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list
        vmxml.vcpus = vcpus_xml

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        vmxml.sync()
        logging.debug(vmxml)

        # assemble setvcpu_option
        if isinstance(setvcpu_option, str):
            setvcpu_option = {setvcpu_option: setvcpu_action}

        # run virsh setvcpu and check vcpus in xml
        if check == "coldplug":
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    cpulist = cpu.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option)

        # start vm
        if check.startswith("hotplug"):
            virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout)

        # turn setvcpu_option to an ordered dict
        if isinstance(setvcpu_option, tuple):
            d = collections.OrderedDict()
            length = len(setvcpu_option)
            if (length % 2):
                test.fail("test config fail")
            for i in range(length):
                if not (i % 2):
                    d[setvcpu_option[i]] = setvcpu_option[i + 1]
            setvcpu_option = collections.OrderedDict()
            setvcpu_option = d.copy()

        if check.startswith("hotplug"):
            for cpus, option in setvcpu_option.items():
                vmxml_pre = vm_xml.VMXML.new_from_dumpxml(vm_name)
                cpus_online_pre = vm.get_cpu_count()
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    cpulist = cpu.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option, cpus_online_pre)
                    # check vcpu order only when live status of vcpu is changed
                    if 'config' not in option:
                        check_vcpu_order(cpulist, option, vmxml_pre)

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()
Exemple #5
0
def run(test, params, env):
    """
    Test command: virsh setvcpu.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpu operation.
    3. Check in the following places
    vcpuinfo
    vcpupin
    vcpucount
    inside guest
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpu_pre_vm_state")
    options = params.get("setvcpu_options")
    vm_ref = params.get("setvcpu_vm_ref", "name")
    current_vcpu = int(params.get("setvcpu_current", "2"))
    vcpu_list_format = params.get("setvcpu_list_format", "comma")
    iteration = int(params.get("setvcpu_iteration", 1))
    invalid_vcpulist = params.get("invalid_vcpulist", "")
    convert_err = "Can't convert {0} to integer type"
    unsupport_str = params.get("unsupport_str", "")
    try:
        current_vcpu = int(params.get("setvcpu_current", "1"))
    except ValueError:
        test.cancel(convert_err.format(current_vcpu))

    try:
        max_vcpu = int(params.get("setvcpu_max", "4"))
    except ValueError:
        test.cancel(convert_err.format(max_vcpu))

    extra_param = params.get("setvcpu_extra_param")
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    set_topology = "yes" == params.get("set_topology", "no")
    sockets = int(params.get("topology_sockets", '1'))
    cores = int(params.get("topology_cores", '4'))
    threads = int(params.get("topology_threads", '1'))
    start_vm_after_set = "yes" == params.get("start_vm_after_set", "no")
    iteration = int(params.get("hotplug_iteration", "1"))

    # Early death
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match("setvcpu", item) is None:
            test.cancel("The current libvirt "
                        "version doesn't support "
                        "'%s' option" % item)

    # Calculate count options
    vcpu_list = []
    # Init expect vcpu count values
    exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                'cur_config': current_vcpu, 'cur_live': current_vcpu,
                'guest_live': current_vcpu}

    def set_expected(vm, options, enabled=True):
        """
        Set the expected vcpu numbers

        :param vm: vm object
        :param options: setvcpu options
        :param enabled: True or False base on enable or disable
        """
        if enabled:
            if ("config" in options) or ("current" in options and vm.is_dead()):
                exp_vcpu['cur_config'] += threads
            elif ("live" in options) or ("current" in options and vm.is_alive()):
                exp_vcpu['cur_live'] += threads
                exp_vcpu['guest_live'] += threads
            else:
                # when none given it defaults to live
                exp_vcpu['cur_live'] += threads
                exp_vcpu['guest_live'] += threads
        else:
            if ("--config" in options) or ("--current" in options and vm.is_dead()):
                exp_vcpu['cur_config'] -= threads
            elif ("--live" in options) or ("--current" in options and vm.is_alive()):
                exp_vcpu['cur_live'] -= threads
                exp_vcpu['guest_live'] -= threads
            else:
                # when none given it defaults to live
                exp_vcpu['cur_live'] -= threads
                exp_vcpu['guest_live'] -= threads

    if threads > 1:
        start_vcpu = current_vcpu
    else:
        start_vcpu = current_vcpu + 1

    if 'hypen' in vcpu_list_format:
        for vcpu_start in range(start_vcpu, max_vcpu, threads):
            if int(threads) > 1:
                lst = "%s-%s" % (
                    str(vcpu_start), str(vcpu_start + threads - 1))
            else:
                lst = vcpu_start
            vcpu_list.append(lst)
    elif 'comma' in vcpu_list_format:
        for vcpu_start in range(start_vcpu, max_vcpu, threads):
            if int(threads) > 1:
                lst = ''
                for idx in range(vcpu_start, vcpu_start + threads):
                    lst += "%s," % idx
            else:
                lst = vcpu_start
            vcpu_list.append(lst.strip(','))
    else:
        pass

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        test.cancel("remote/local ip parameters not set.")

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Run test
    try:
        if vm.is_alive():
            vm.destroy()

        # Set cpu topology
        if set_topology:
            vmxml.set_vm_vcpus(vm.name, max_vcpu, current_vcpu,
                               sockets=sockets, cores=cores, threads=threads,
                               add_topology=True)

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        setvcpu_exit_status = 0
        setvcpu_exit_stderr = ''
        # TODO: Run remote test,for future use
        if vm_ref == "remote":
            pass
        # Run local test
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpu_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpu_invalid_id") is not None:
                    dom_option = params.get("setvcpu_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpu_invalid_uuid") is not None:
                    dom_option = params.get("setvcpu_invalid_uuid")
            else:
                dom_option = vm_ref
            for itr in range(iteration):
                if extra_param:
                    vcpu_list_option = "%s %s" % (vcpu_list[itr], extra_param)
                elif invalid_vcpulist != "":
                    vcpu_list_option = invalid_vcpulist
                else:
                    vcpu_list_option = vcpu_list[itr]
                if 'enable' in options:
                    status = virsh.setvcpu(
                        dom_option, vcpu_list_option, options,
                        ignore_status=True, debug=True)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-enable: %s\n" % (itr,
                                                                    status.stderr.strip())
                    set_expected(vm, options, True)
                elif 'disable' in options:
                    # disable needs a hotpluggable cpus, lets make sure we have
                    if status_error != "yes":
                        options_enable = options.replace("disable", "enable")
                        virsh.setvcpu(dom_option, vcpu_list_option,
                                      options_enable, ignore_status=False,
                                      debug=True)
                        # Adjust the expected vcpus
                        set_expected(vm, options, True)
                    status = virsh.setvcpu(
                        dom_option, vcpu_list_option, options,
                        ignore_status=True, debug=True)
                    unsupport_str = cpu.vcpuhotunplug_unsupport_str()
                    if unsupport_str and (unsupport_str in status.stderr):
                        test.cancel("Vcpu hotunplug is not supported in this host:"
                                    "\n%s" % status.stderr)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-disable: %s\n" % (itr,
                                                                     status.stderr.strip())
                    # Adjust the expected vcpus
                    set_expected(vm, options, False)
                # Handle error cases
                else:
                    status = virsh.setvcpu(dom_option, vcpu_list_option,
                                           options, ignore_status=True,
                                           debug=True)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-error: %s\n" % (itr,
                                                                   status.stderr.strip())
            # Start VM after set vcpu
            if start_vm_after_set:
                if "--enable" in options:
                    if "--config" in options or "--current" in options:
                        exp_vcpu['cur_live'] = exp_vcpu['cur_config']
                if "--disable" in options:
                    if "--config" in options or "--current" in options:
                        exp_vcpu['cur_live'] = exp_vcpu['cur_config']
                if vm.is_alive():
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name, ignore_status=True,
                                         debug=True)
                    libvirt.check_exit_status(result)

            # Lets validate the result in positive cases
            if status_error != "yes":
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option=options)
    finally:
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            test.fail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command"
                      " stderr=%s" % setvcpu_exit_stderr)
        else:
            if not result:
                test.fail("Test Failed")
Exemple #6
0
def run(test, params, env):
    """
    Test setvcpu feature as follows:
    positive test:
        1. run virsh setvcpu with option --enable and --disable on inactive vm
           and check xml
        2. run virsh setvcpu with option --enable and --disable on active vm and
           check xml and number of online vcpu
        3. run virsh setvcpu with option --enable, --disable and --config on
           active vm and check inactive xml
    negative test:
        1. run virsh setvcpu with more than one vcpu on active vm and check error
        2. run virsh setvcpu to hotplug/unplug invalid vcpu and check error
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpu_placement = params.get("vcpu_placement", "static")
    maxvcpu = int(params.get("maxvcpu", "8"))
    vcpu_current = params.get("vcpu_current", "1")
    vcpus_enabled = ast.literal_eval(params.get("vcpus_enabled", "{0}"))
    vcpus_hotplug = ast.literal_eval(params.get("vcpus_hotpluggable", "{0}"))
    setvcpu_option = ast.literal_eval(params.get("setvcpu_option", "{}"))
    start_timeout = int(params.get("start_timeout", "60"))
    check = params.get("check", "")
    err_msg = params.get("err_msg", "")
    status_error = "yes" == params.get("status_error", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_status(cpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpus_online_pre: number of online vcpu before running setvcpu

        """

        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpus, option in setvcpu_option.items():
            cpulist = libvirt.cpus_parser(cpus)
            for cpu_id in cpulist:
                if ("enable" in option):
                    cpu_count += 1
                    if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                        test.fail("vcpu status check fail")
                elif ("disable" in option):
                    cpu_count -= 1
                    if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                        test.fail("vcpu status check fail")
                else:
                    test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not utils_misc.check_if_vm_vcpu_match(
                    cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")

    try:
        # define vcpu in xml
        vmxml.placement = vcpu_placement
        vmxml.vcpu = maxvcpu
        vmxml.current_vcpu = vcpu_current
        del vmxml.cpuset

        # define vcpus in xml
        vcpu_list = []
        vcpu = {}

        for vcpu_id in range(maxvcpu):
            vcpu['id'] = str(vcpu_id)

            if vcpu_id in vcpus_enabled:
                vcpu['enabled'] = 'yes'
            else:
                vcpu['enabled'] = 'no'

            if vcpu_id in vcpus_hotplug:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(vcpu.copy())

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list
        vmxml.vcpus = vcpus_xml
        vmxml.sync()
        logging.debug(vmxml)

        # run virsh setvcpu and check vcpus in xml
        if check == "coldplug":
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                check_vcpu_status()

        # start vm
        virsh.start(vm_name, debug=True, ignore_status=False)
        vm.wait_for_login(timeout=start_timeout)

        cpus_online_pre = vm.get_cpu_count()
        if check.startswith("hotplug"):
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    check_vcpu_status(cpus_online_pre)

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()
Exemple #7
0
def run(test, params, env):
    """
    Test command: virsh setvcpu.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpu operation.
    3. Check in the following places
    vcpuinfo
    vcpupin
    vcpucount
    inside guest
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpu_pre_vm_state")
    options = params.get("setvcpu_options")
    vm_ref = params.get("setvcpu_vm_ref", "name")
    current_vcpu = int(params.get("setvcpu_current", "2"))
    vcpu_list_format = params.get("setvcpu_list_format", "comma")
    iteration = int(params.get("setvcpu_iteration", 1))
    invalid_vcpulist = params.get("invalid_vcpulist", "")
    convert_err = "Can't convert {0} to integer type"
    unsupport_str = params.get("unsupport_str", "")
    try:
        current_vcpu = int(params.get("setvcpu_current", "1"))
    except ValueError:
        test.cancel(convert_err.format(current_vcpu))

    try:
        max_vcpu = int(params.get("setvcpu_max", "4"))
    except ValueError:
        test.cancel(convert_err.format(max_vcpu))

    extra_param = params.get("setvcpu_extra_param")
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    set_topology = "yes" == params.get("set_topology", "no")
    sockets = int(params.get("topology_sockets", '1'))
    cores = int(params.get("topology_cores", '4'))
    threads = int(params.get("topology_threads", '1'))
    start_vm_after_set = "yes" == params.get("start_vm_after_set", "no")
    iteration = int(params.get("hotplug_iteration", "1"))

    # Early death
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match("setvcpu", item) is None:
            test.cancel("The current libvirt "
                        "version doesn't support "
                        "'%s' option" % item)

    # Calculate count options
    vcpu_list = []
    # Init expect vcpu count values
    exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                'cur_config': current_vcpu, 'cur_live': current_vcpu,
                'guest_live': current_vcpu}

    def set_expected(vm, options, enabled=True):
        """
        Set the expected vcpu numbers

        :param vm: vm object
        :param options: setvcpu options
        :param enabled: True or False base on enable or disable
        """
        if enabled:
            if ("config" in options) or ("current" in options and vm.is_dead()):
                exp_vcpu['cur_config'] += threads
            elif ("live" in options) or ("current" in options and vm.is_alive()):
                exp_vcpu['cur_live'] += threads
                exp_vcpu['guest_live'] += threads
            else:
                # when none given it defaults to live
                exp_vcpu['cur_live'] += threads
                exp_vcpu['guest_live'] += threads
        else:
            if ("--config" in options) or ("--current" in options and vm.is_dead()):
                exp_vcpu['cur_config'] -= threads
            elif ("--live" in options) or ("--current" in options and vm.is_alive()):
                exp_vcpu['cur_live'] -= threads
                exp_vcpu['guest_live'] -= threads
            else:
                # when none given it defaults to live
                exp_vcpu['cur_live'] -= threads
                exp_vcpu['guest_live'] -= threads

    if threads > 1:
        start_vcpu = current_vcpu
    else:
        start_vcpu = current_vcpu + 1

    if 'hypen' in vcpu_list_format:
        for vcpu_start in range(start_vcpu, max_vcpu, threads):
            if int(threads) > 1:
                lst = "%s-%s" % (
                    str(vcpu_start), str(vcpu_start + threads - 1))
            else:
                lst = vcpu_start
            vcpu_list.append(lst)
    elif 'comma' in vcpu_list_format:
        for vcpu_start in range(start_vcpu, max_vcpu, threads):
            if int(threads) > 1:
                lst = ''
                for idx in range(vcpu_start, vcpu_start + threads):
                    lst += "%s," % idx
            else:
                lst = vcpu_start
            vcpu_list.append(lst.strip(','))
    else:
        pass

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        test.cancel("remote/local ip parameters not set.")

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Run test
    try:
        if vm.is_alive():
            vm.destroy()

        # Set cpu topology
        if set_topology:
            vmxml.set_vm_vcpus(vm.name, max_vcpu, current_vcpu,
                               sockets=sockets, cores=cores, threads=threads,
                               add_topology=True)

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        setvcpu_exit_status = 0
        setvcpu_exit_stderr = ''
        # TODO: Run remote test,for future use
        if vm_ref == "remote":
            pass
        # Run local test
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpu_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpu_invalid_id") is not None:
                    dom_option = params.get("setvcpu_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpu_invalid_uuid") is not None:
                    dom_option = params.get("setvcpu_invalid_uuid")
            else:
                dom_option = vm_ref
            for itr in range(iteration):
                if extra_param:
                    vcpu_list_option = "%s %s" % (vcpu_list[itr], extra_param)
                elif invalid_vcpulist != "":
                    vcpu_list_option = invalid_vcpulist
                else:
                    vcpu_list_option = vcpu_list[itr]
                if 'enable' in options:
                    status = virsh.setvcpu(
                        dom_option, vcpu_list_option, options,
                        ignore_status=True, debug=True)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-enable: %s\n" % (itr,
                                                                    status.stderr.strip())
                    set_expected(vm, options, True)
                elif 'disable' in options:
                    # disable needs a hotpluggable cpus, lets make sure we have
                    if status_error != "yes":
                        options_enable = options.replace("disable", "enable")
                        virsh.setvcpu(dom_option, vcpu_list_option,
                                      options_enable, ignore_status=False,
                                      debug=True)
                        # Adjust the expected vcpus
                        set_expected(vm, options, True)
                    status = virsh.setvcpu(
                        dom_option, vcpu_list_option, options,
                        ignore_status=True, debug=True)
                    unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                    if unsupport_str and (unsupport_str in status.stderr):
                        test.cancel("Vcpu hotunplug is not supported in this host:"
                                    "\n%s" % status.stderr)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-disable: %s\n" % (itr,
                                                                     status.stderr.strip())
                    # Adjust the expected vcpus
                    set_expected(vm, options, False)
                # Handle error cases
                else:
                    status = virsh.setvcpu(dom_option, vcpu_list_option,
                                           options, ignore_status=True,
                                           debug=True)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-error: %s\n" % (itr,
                                                                   status.stderr.strip())
            # Start VM after set vcpu
            if start_vm_after_set:
                if "--enable" in options:
                    if "--config" in options or "--current" in options:
                        exp_vcpu['cur_live'] = exp_vcpu['cur_config']
                if "--disable" in options:
                    if "--config" in options or "--current" in options:
                        exp_vcpu['cur_live'] = exp_vcpu['cur_config']
                if vm.is_alive():
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name, ignore_status=True,
                                         debug=True)
                    libvirt.check_exit_status(result)

            # Lets validate the result in positive cases
            if status_error != "yes":
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option=options)
    finally:
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            test.fail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command"
                      " stderr=%s" % setvcpu_exit_stderr)
        else:
            if not result:
                test.fail("Test Failed")
Exemple #8
0
def run(test, params, env):
    """
    Test setvcpu feature as follows:
    positive test:
        1. run virsh setvcpu with option --enable and --disable on inactive vm
           and check xml
        2. run virsh setvcpu with option --enable and --disable on active vm and
           check xml and number of online vcpu
        3. run virsh setvcpu with option --enable, --disable and --config on
           active vm and check inactive xml
        4. check the vcpu order when hot plug/unplug specific vcpu
    negative test:
        1. run virsh setvcpu with more than one vcpu on active vm and check error
        2. run virsh setvcpu to hotplug/unplug invalid vcpu and check error
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpu_placement = params.get("vcpu_placement", "static")
    maxvcpu = int(params.get("maxvcpu", "8"))
    vcpu_current = params.get("vcpu_current", "1")
    vcpus_enabled = ast.literal_eval(params.get("vcpus_enabled", "{0}"))
    vcpus_hotplug = ast.literal_eval(params.get("vcpus_hotpluggable", "{0}"))
    setvcpu_option = ast.literal_eval(params.get("setvcpu_option", "{}"))
    start_timeout = int(params.get("start_timeout", "60"))
    check = params.get("check", "")
    err_msg = params.get("err_msg", "")
    status_error = "yes" == params.get("status_error", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not utils_misc.check_if_vm_vcpu_match(cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")

    def get_vcpu_order(vmxml):
        """
        return a {vcpu:order} dict based on vcpus in xml

        :param vmxml: the instance of VMXML class
        """
        vcpu_order = {}
        # get vcpu order based on the online vcpu
        for cpu_id in range(maxvcpu):
            if vmxml.vcpus.vcpu[cpu_id].get('enabled') == "yes":
                vcpu_order[cpu_id] = int(vmxml.vcpus.vcpu[cpu_id].get('order'))

        logging.debug("vcpu order based on vcpus in xml {}".format(vcpu_order))
        return vcpu_order.copy()

    def check_vcpu_order(cpulist, cpu_option, vmxml_pre):
        """
        check the value of vcpu order in xml. when the online vcpu changes,
        the order should be redefined.

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu such as config, enable and live
        :param vmxml_pre: the instance of VMXML class before run setvcpu
        """
        # only one vcpu is valid in the live operation of setvcpu command
        if len(cpulist) == 1:
            vcpu = cpulist[0]
        else:
            test.fail("wrong vcpu value from cfg file")

        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # get vcpus order dict from previous xml
        order_pre = get_vcpu_order(vmxml_pre)
        # get vcpus order dict from updated xml
        order_new = get_vcpu_order(vmxml_new)

        # calculate the right dict of vcpu order based on the previous one
        if "enable" in cpu_option:
            order_expect = order_pre.copy()
            order_expect[vcpu] = len(order_pre) + 1
        elif "disable" in cpu_option:
            for vcpuid, order in order_pre.items():
                if order > order_pre[vcpu]:
                    order_pre[vcpuid] = order - 1
            order_pre.pop(vcpu)
            order_expect = order_pre.copy()

        if order_expect != order_new:
            test.fail("vcpu order check fail")

    try:
        # define vcpu in xml
        vmxml.placement = vcpu_placement
        vmxml.vcpu = maxvcpu
        vmxml.current_vcpu = vcpu_current
        del vmxml.cpuset

        # define vcpus in xml
        vcpu_list = []
        vcpu = {}

        for vcpu_id in range(maxvcpu):
            vcpu['id'] = str(vcpu_id)

            if vcpu_id in vcpus_enabled:
                vcpu['enabled'] = 'yes'
            else:
                vcpu['enabled'] = 'no'

            if vcpu_id in vcpus_hotplug:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(vcpu.copy())

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list
        vmxml.vcpus = vcpus_xml
        vmxml.sync()
        logging.debug(vmxml)

        # run virsh setvcpu and check vcpus in xml
        if check == "coldplug":
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name, cpus, option, debug=True)
                cpulist = libvirt.cpus_parser(cpus)
                check_vcpu_status(cpulist, option)

        # start vm
        virsh.start(vm_name, debug=True, ignore_status=False)
        vm.wait_for_login(timeout=start_timeout)

        # turn setvcpu_option to an ordered dict
        if isinstance(setvcpu_option, tuple):
            d = collections.OrderedDict()
            length = len(setvcpu_option)
            if (length % 2):
                test.fail("test config fail")
            for i in range(length):
                if not (i % 2):
                    d[setvcpu_option[i]] = setvcpu_option[i+1]
            setvcpu_option = collections.OrderedDict()
            setvcpu_option = d.copy()

        if check.startswith("hotplug"):
            for cpus, option in setvcpu_option.items():
                vmxml_pre = vm_xml.VMXML.new_from_dumpxml(vm_name)
                cpus_online_pre = vm.get_cpu_count()
                result_to_check = virsh.setvcpu(vm_name, cpus, option, debug=True)
                if not status_error:
                    cpulist = libvirt.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option, cpus_online_pre)
                    # check vcpu order only when live status of vcpu is changed
                    if 'config' not in option:
                        check_vcpu_order(cpulist, option, vmxml_pre)

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()