Example #1
0
    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")
Example #2
0
def test_vcpupin_current_inactive_vm(test, vm, cpu_max_id, params):
    """
    Test case for executing vcpupin --current with shutoff vm

    :param test: test object
    :param vm: vm object
    :param cpu_max_id: maximum id of host cpu id
    :param params: test parameters
    """
    logging.debug("Step 1: Destory vm if any")
    if vm.is_alive():
        vm.destroy()

    logging.debug("Step 2: execute virsh vcpupin --current "
                  "and return expected new vcpupin values")
    vcpupin_conf = eval(params.get("vcpupin_conf"))
    vcpupin_new_values = get_expected_vcpupin(vm.name,
                                              vcpupin_conf,
                                              cpu_max_id,
                                              vcpupin_option='--current')

    logging.debug("Step 3: check the vcpupin output with no "
                  "option is aligned with the new vcpupin values")
    compare_2_dicts(test, get_vcpupin_dict(vm.name), vcpupin_new_values)

    logging.debug("Step 4: start vm")
    vm.start()
    vm.wait_for_login().close()

    logging.debug("Step 5: check vcpuinfo affinity is aligned "
                  "with new vcpupin values")
    vcpu_max = params.get('vcpu_max', '4')
    vcpu_current = params.get('vcpu_current', '2')
    # Replace the max cpu id in the pattern
    affinity_pattern = params.get('affinity_pattern')
    output = virsh.vcpuinfo(vm.name,
                            options='--pretty',
                            debug=True,
                            ignore_status=False).stdout.rstrip()
    affinity = re.findall(affinity_pattern, output)
    if not affinity or len(affinity) != int(vcpu_current):
        test.fail("%s vcpu info with affinity is expected, "
                  "but %s found:%s" % (vcpu_current, len(affinity), affinity))
    check_vcpuinfo_affinity(test, affinity, vcpupin_new_values)

    logging.debug("Step 6: hotplug vcpu")
    virsh.setvcpus(vm.name, str(vcpu_max), ignore_status=False, debug=True)

    logging.debug("Step 7: check vcpuinfo affinity is changed "
                  "and aligned with new vcpupin values")
    output = virsh.vcpuinfo(vm.name,
                            options='--pretty',
                            debug=True,
                            ignore_status=False).stdout.rstrip()
    affinity = re.findall(affinity_pattern, output)
    if not affinity or len(affinity) != int(vcpu_max):
        test.fail("%s vcpu info with affinity is expected, "
                  "but %s found:%s" % (vcpu_max, len(affinity), affinity))
    check_vcpuinfo_affinity(test, affinity, vcpupin_new_values)
Example #3
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        host_cpu_count = utils.count_cpus()
        for i in range(host_cpu_count):
            ret = virsh.vcpupin(vm_name, 0, i, debug=True, ignore_status=True)
            if ret.exit_status:
                raise error.TestFail("vcpupin failed while numad running, %s"
                                     % bug_url)
            virsh.vcpuinfo(vm_name, debug=True)
    finally:
        utils.run("service numad stop")
        libvirtd.restart()
        backup_xml.sync()
Example #4
0
 def check_vcpu_affinity():
     """
     check vcpu affinity defined by vcpu cpuset or cputune cpuset
     """
     affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
     affinity = {vcpu: affinity}
     virsh.vcpuinfo(vm_name, debug=True)
     # check the expected vcpu affinity with the one got from running vm
     if not utils_hotplug.check_affinity(vm, affinity):
         test.fail("vcpu affinity check fail")
Example #5
0
def cpu_affinity_check(test, vm_name, cpuset=None, node=None):
    """
    Check vcpuinfo cpu affinity

    :param test: test object
    :param vm_name: the vm name
    :param cpuset: cpuset list
    :param node: node number list
    :raises: test.fail if cpu affinity is not expected
    """

    result = virsh.vcpuinfo(vm_name, debug=True)
    output = result.stdout.strip().splitlines()[-1]
    cpu_affinity = output.split(":")[-1].strip()
    if node:
        tmp_list = []
        for node_num in node:
            host_node = utils_misc.NumaNode(i=node_num + 1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]
    if cpuset:
        cpu_list = cpuset
    ret = format_affinity_str(cpu_list)
    logging.debug("expect cpu affinity is %s", ret)
    if cpu_affinity != ret:
        test.fail("vcpuinfo cpu affinity not expected")
Example #6
0
    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        # check the expected vcpu affinity with the one got from running vm
        elif not utils_hotplug.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")
Example #7
0
    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
        # check the expected vcpu affinity with the one got from running vm
                elif not utils_hotplug.check_affinity(vm, affinity):
                    test.fail("vcpu affinity check fail")
            except xcepts.LibvirtXMLError:
                pass
Example #8
0
    def build_actual_info(domname, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        @param: domname: VM Name to operate on
        @param: vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(domname)
        cmd = re.findall('[^Affinity:][-y]+', str(output))
        total_affinity = cmd[vcpu].lstrip()
        actual_affinity = list(total_affinity)
        return actual_affinity
Example #9
0
    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity
Example #10
0
    def build_actual_info(domname, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        @param: domname: VM Name to operate on
        @param: vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(domname)
        cmd = re.findall('[^Affinity:][-y]+', str(output))
        total_affinity = cmd[vcpu].lstrip()
        actual_affinity = list(total_affinity)
        return actual_affinity
Example #11
0
def affinity_from_vcpuinfo(vm):
    """
    Returns list of the vcpu's affinity from
    virsh vcpuinfo output

    :param vm: VM object

    :return: affinity list of VM
    """
    output = virsh.vcpuinfo(vm.name).stdout_text.rstrip()
    affinity = re.findall('CPU Affinity: +[-y]+', output)
    total_affinity = [list(vcpu_affinity.split()[-1].strip())
                      for vcpu_affinity in affinity]
    return total_affinity
Example #12
0
    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity
Example #13
0
def affinity_from_vcpuinfo(vm):
    """
    Returns list of the vcpu's affinity from
    virsh vcpuinfo output

    :param vm: VM object

    :return: affinity list of VM
    """
    output = results_stdout_52lts(virsh.vcpuinfo(vm.name)).rstrip()
    affinity = re.findall('CPU Affinity: +[-y]+', output)
    total_affinity = [list(vcpu_affinity.split()[-1].strip())
                      for vcpu_affinity in affinity]
    return total_affinity
Example #14
0
    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num+1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            raise error.TestFail("vcpuinfo cpu affinity not expected")
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #16
0
def run(test, params, env):
    """
    Test command: virsh vcpuinfo.

    The command can get domain vcpu information
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh vcpuinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    vm_ref = params.get("vcpuinfo_vm_ref")
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    def remote_case(params, vm_name):
        """
        Test remote case.
        """
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        local_pwd = params.get("local_pwd", None)
        # Used for connecting from remote to local
        connect_uri = params.get("remote_connect_uri",
                                 "qemu+ssh://LOCAL.EXAMPLE.COM/system")
        # Verify connect_uri/remote_ip/local_ip is useful for this test.
        if ("EXAMPLE" in remote_ip or "EXAMPLE" in connect_uri
                or "EXAMPLE" in local_ip):
            test.cancel("Please set remote_ip or connect_uri or local_ip.")

        status = 0
        output = ""
        err = ""
        try:
            ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd,
                                         local_ip, "root", local_pwd)
            vcback = virsh.VirshConnectBack(remote_ip=remote_ip,
                                            remote_pwd=remote_pwd,
                                            uri=connect_uri,
                                            debug=True,
                                            ignore_status=True)
            cmdresult = vcback.vcpuinfo(vm_name)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            vcback.close_session()
            if status != 0:
                err = output
        except process.CmdError:
            status = 1
            output = ""
            err = "remote test failed"
        # Maintain result format conformance with local test
        return status, output, err

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("vcpuinfo_extra"))

    if vm_ref == "remote":
        # Keep status_error check conditions (below) simple
        status, output, err = remote_case(params, vm_name)
    else:
        result = virsh.vcpuinfo(vm_ref)
        status = result.exit_status
        output = result.stdout.strip()
        err = result.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if not status:
            if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0):
                logging.debug(
                    "From libvirt version 5.6.0 libvirtd is restarted "
                    "and command should succeed")
            else:
                logging.debug(result)
                test.fail("Run successfully with wrong command!")
        # Check the error message in negative case.
        if not err and not libvirt_version.version_compare(5, 6, 0):
            logging.debug(result)
            logging.debug(
                "Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=889276 "
                "is helpful for tracing this bug.")
            test.fail("No error message for a command error!")
    elif status_error == "no":
        if status:
            logging.debug(result)
            test.fail("Run failed with right command")
Example #17
0
                if output.splitlines()[i].split()[-1] != expect_vcpu_num[j]:
                    raise error.TestFail(
                        "Virsh vcpucount output is unexpected")
            except IndexError, detail:
                raise error.TestFail(detail)
    logging.debug("Command vcpucount check pass")

    # Check virsh vcpuinfo output, (1) count vcpu number, if domain is
    # alive, vcpu number(current) correspond to expect_vcpu_num[3],
    # otherwise, it correspond to expect_vcpu_num[2]; (2) get cpus affinity,
    # and check them in virsh vcpupin command
    if vm.is_alive():
        i = 3
    else:
        i = 2
    result = virsh.vcpuinfo(vm.name, ignore_status=True, debug=True)
    libvirt.check_exit_status(result)
    output = result.stdout.strip()
    vcpuinfo_num = len(output.split("\n\n"))
    logging.debug("Get %s vcpus in virsh vcpuinfo output", vcpuinfo_num)
    if vcpuinfo_num != int(expect_vcpu_num[i]):
        raise error.TestFail("Vcpu number in virsh vcpuinfo is unexpected")
    vcpuinfo_affinity = re.findall('CPU Affinity: +([-y]+)', output)
    logging.debug("Command vcpuinfo check pass")

    # Check vcpu number in domain XML, if setvcpu with '--config' option,
    # or domain is dead, vcpu number correspond to expect_vcpu_num[2],
    # otherwise, it correspond to expect_vcpu_num[3]
    dumpxml_option = ""
    if setvcpu_option == "--config" or vm.is_dead():
        dumpxml_option = "--inactive"
Example #18
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not cpu.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name,
                                             "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(
                    r"vcpu.*current=.%s.*" % config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip(
                    '\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #19
0
def run_virsh_vcpuinfo(test, params, env):
    """
    Test command: virsh vcpuinfo.

    The command can get domain vcpu information
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh vcpuinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    vm_ref = params.get("vcpuinfo_vm_ref")
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    def remote_case(params, vm_name):
        """
        Test remote case.
        """

        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        # Used for connecting from remote to local
        connect_uri = params.get("remote_connect_uri",
                                 "qemu+ssh://LOCAL.EXAMPLE.COM/system")
        # Verify connect_uri is useful for this test.
        if not virsh.VirshConnectBack.kosher_args(remote_ip, connect_uri):
            raise error.TestNAError("The connect_uri parameter '%s' does "
                                    "not point at fully-qualified host "
                                    "from perspective of remote support "
                                    "system at '%s'." % (connect_uri,
                                                         remote_ip))

        status = 0
        output = ""
        err = ""
        try:
            vcback = virsh.VirshConnectBack(remote_ip=remote_ip,
                                            remote_pwd=remote_pwd,
                                            uri=connect_uri,
                                            debug=True,
                                            ignore_status=True)
            cmdresult = vcback.vcpuinfo(vm_name)
            status = cmdresult.exit_status
            output = cmdresult.stdout
            vcback.close_session()
            if status != 0:
                err = output
        except error.CmdError:
            status = 1
            output = ""
            err = "remote test failed"
        # Maintain result format conformance with local test
        return status, output, err

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("vcpuinfo_extra"))

    if vm_ref == "remote":
        # Keep status_error check conditions (below) simple
        status, output, err = remote_case(params, vm_name)
    else:
        result = virsh.vcpuinfo(vm_ref)
        status = result.exit_status
        output = result.stdout.strip()
        err = result.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if not status:
            logging.debug(result)
            raise error.TestFail("Run successfully with wrong command!")
        # Check the error message in negative case.
        if not err:
            logging.debug(result)
            logging.debug("Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=889276 "
                          "is helpful for tracing this bug.")
            raise error.TestFail("No error message for a command error!")
    elif status_error == "no":
        if status:
            logging.debug(result)
            raise error.TestFail("Run failed with right command")
Example #20
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    start_vm_after_config = params.get('start_vm_after_config', 'yes') == 'yes'

    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            daemon_conf_dict = {
                "log_level": "1",
                "log_filters":
                "\"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event\"",
                "log_outputs": "\"1:file:{}\"".format(config_path)
            }
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()
        logging.debug("Before starting, VM xml:"
                      "\n%s", vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))
        # Start VM
        if start_vm_after_config:
            logging.info("Start VM with vcpu hotpluggable and order...")
            ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            if start_vm_after_config:
                # Wait for domain
                vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            if start_vm_after_config:
                cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" %
                       (vm_name, vcpus_max))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd and start_vm_after_config:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            expect_num = 2 if start_vm_after_config else 1
            if len(max_list) != expect_num:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            expect_num = vcpus_crt if start_vm_after_config else int(
                config_vcpus)
            if len(vcpu_lines) != expect_num:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if start_vm_after_config and not cpu.check_if_vm_vcpu_match(
                    vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                check_vcpu_after_plug_unplug(test, vm_name, config_vcpus)

            # Restart libvirtd
            libvirtd.restart()
            if config_vcpus and not start_vm_after_config:
                check_vm_exist(test, vm_name, 'shut off')
            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            if start_vm_after_config:
                en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
                for vcpu_sn in range(len(en_vcpu_list)):
                    vcpu_id = en_vcpu_list[vcpu_sn].split(
                        "=")[1].split()[0].strip('\'')
                    cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid())
                    cg_path = cg_obj.get_cgroup_path("cpuset")
                    if cg_obj.is_cgroup_v2_enabled():
                        vcpu_path = os.path.join(cg_path, "vcpu%s" % vcpu_id)
                    else:
                        vcpu_path = os.path.join(cg_path,
                                                 "../vcpu%s" % vcpu_id)
                    if not os.path.exists(vcpu_path):
                        test.fail(
                            "Failed to find the enabled vcpu{} in {}.".format(
                                vcpu_id, cg_path))
    finally:
        # Recover libvirtd configuration
        if config_libvirtd and 'daemon_conf' in locals():
            libvirt.customize_libvirt_config(None,
                                             remote_host=False,
                                             is_recover=True,
                                             config_object=daemon_conf)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #21
0
def run_virsh_vcpuinfo(test, params, env):
    """
    Test command: virsh vcpuinfo.

    The command can get domain vcpu information
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh vcpuinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()

    #run test case
    vm_ref = params.get("vcpuinfo_vm_ref")
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    def remote_case(params, vm_name):
        """
        Test remote case.
        """
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Remote test defaults not changed")
        status = 0
        output = ""
        err = ""
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s vcpuinfo %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
            if status != 0:
                err = output
        except error.CmdError:
            status = 1
            output = ""
            err = "remote test failed"
        return status, output, err


    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif  vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("vcpuinfo_extra"))

    if vm_ref == "remote":
        status, output, err = remote_case(params, vm_name)
    else:
        result = virsh.vcpuinfo(vm_ref)
        status = result.exit_status
        output = result.stdout.strip()
        err = result.stderr.strip()

    #recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    #check status_error
    if status_error == "yes":
        if status == 0 or err == "":
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or output == "":
            raise error.TestFail("Run failed with right command")
Example #22
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name, 0, cpu, debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
Example #23
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if len(node_list) < 2:
            test.cancel('Online NUMA nodes less than 2')
        node_a, node_b = min(node_list), max(node_list)
        numa_memory.update({'nodeset': '%d,%d' % (node_a, node_b)})
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name,
                                0,
                                cpu,
                                debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
            try:
                if output.splitlines()[i].split()[-1] != expect_vcpu_num[j]:
                    raise error.TestFail("Virsh vcpucount output is unexpected")
            except IndexError, detail:
                raise error.TestFail(detail)
    logging.debug("Command vcpucount check pass")

    # Check virsh vcpuinfo output, (1) count vcpu number, if domain is
    # alive, vcpu number(current) correspond to expect_vcpu_num[3],
    # otherwise, it correspond to expect_vcpu_num[2]; (2) get cpus affinity,
    # and check them in virsh vcpupin command
    if vm.is_alive():
        i = 3
    else:
        i = 2
    result = virsh.vcpuinfo(vm.name, ignore_status=True, debug=True)
    libvirt.check_exit_status(result)
    output = result.stdout.strip()
    vcpuinfo_num = len(output.split("\n\n"))
    logging.debug("Get %s vcpus in virsh vcpuinfo output", vcpuinfo_num)
    if vcpuinfo_num != int(expect_vcpu_num[i]):
        raise error.TestFail("Vcpu number in virsh vcpuinfo is unexpected")
    vcpuinfo_affinity = re.findall('CPU Affinity: +([-y]+)', output)
    logging.debug("Command vcpuinfo check pass")

    # Check vcpu number in domain XML, if setvcpu with '--config' option,
    # or domain is dead, vcpu number correspond to expect_vcpu_num[2],
    # otherwise, it correspond to expect_vcpu_num[3]
    dumpxml_option = ""
    if setvcpu_option == "--config" or vm.is_dead():
        dumpxml_option = "--inactive"