예제 #1
0
 def set_condn(action, recover=False):
     """
     Set/reset guest state/action
     :param action: Guest state change/action
     :param recover: whether to recover given state default: False
     """
     if not recover:
         if action == "pin_vcpu":
             for i in range(cur_vcpu):
                 virsh.vcpupin(vm_name, i, hmi_cpu, "--live",
                               ignore_status=False, debug=True)
                 virsh.emulatorpin(vm_name,  hmi_cpu, "live",
                                   ignore_status=False, debug=True)
         elif action == "filetrans":
             utils_test.run_file_transfer(test, params, env)
         elif action == "save":
             save_file = os.path.join(data_dir.get_tmp_dir(),
                                      vm_name + ".save")
             result = virsh.save(vm_name, save_file, ignore_status=True,
                                 debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             if os.path.exists(save_file):
                 result = virsh.restore(save_file, ignore_status=True,
                                        debug=True)
                 utils_test.libvirt.check_exit_status(result)
                 os.remove(save_file)
         elif action == "suspend":
             result = virsh.suspend(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             result = virsh.resume(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
     return
예제 #2
0
def get_expected_vcpupin(vm_name,
                         vcpupin_conf,
                         cpu_max_id,
                         vcpupin_option=None):
    """
    Get the expected vcpupin values which are used to be compared with the
    output of virsh vcpupin command

    :param vm_name: the vm name
    :param vcpupin_conf: dict, the configuration for vcpupin
    :param cpu_max_id: str, the maximum host cpu id
    :param vcpupin_option: str, option for virsh vcpupin command
    :return: dict, the new values expected for vcpupin
    """
    vcpupin_new_values = {}
    for vcpu_id, pin_to_cpu_id in vcpupin_conf.items():
        id_exp = None
        if pin_to_cpu_id == 'r':
            pin_to_cpu_id = '0-%s' % cpu_max_id
        elif pin_to_cpu_id == 'x':
            pin_to_cpu_id = '%s' % cpu_max_id
        elif pin_to_cpu_id == 'x-y,^z':
            pin_to_cpu_id = '0-%s,^%s' % (cpu_max_id, cpu_max_id)
            id_exp = '0-%d' % (int(cpu_max_id) - 1)
        elif pin_to_cpu_id == 'x':
            pin_to_cpu_id = '%d' % (int(cpu_max_id) -
                                    3) if int(cpu_max_id) >= 3 else '0'
        elif pin_to_cpu_id == 'y':
            pin_to_cpu_id = '%d' % (int(cpu_max_id) -
                                    2) if int(cpu_max_id) >= 3 else '0'
        elif pin_to_cpu_id == 'z':
            pin_to_cpu_id = '%d' % (int(cpu_max_id) -
                                    1) if int(cpu_max_id) >= 3 else '0'
        elif pin_to_cpu_id == 'x,y':
            pin_to_cpu_id = '0,%d' % cpu_max_id
        elif pin_to_cpu_id == 'x-y,^z,m':
            pin_to_cpu_id = '0-%d,^%d,%s' % (int(cpu_max_id) - 1,
                                             int(cpu_max_id) - 2, cpu_max_id)
            if int(cpu_max_id) <= 3:
                id_exp = '0,%d-%s' % (int(cpu_max_id) - 1, cpu_max_id)
            else:
                id_exp = '0-%d,%d-%s' % (int(cpu_max_id) - 3,
                                         int(cpu_max_id) - 1, cpu_max_id)
        virsh.vcpupin(vm_name,
                      vcpu=vcpu_id,
                      cpu_list=pin_to_cpu_id,
                      options=vcpupin_option,
                      debug=True,
                      ignore_status=False)

        pin_to_cpu_id = id_exp if id_exp else pin_to_cpu_id
        vcpupin_new_values.update({vcpu_id: pin_to_cpu_id})

    logging.debug("The vcpupin new values are %s" % vcpupin_new_values)
    return vcpupin_new_values
예제 #3
0
 def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid):
     """
     Run the vcpupin command and then check the result.
     """
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is in positive case.
             raise error.TestFail(cmdResult)
         else:
             # Command fail and it is in negative case.
             return
     else:
         if status_error:
             # Command success and it is in negative case.
             raise error.TestFail(cmdResult)
         else:
             # Command success and it is in positive case.
             # "--config" will take effect after VM destroyed.
             if options == "--config":
                 virsh.destroy(vm_name)
                 pid = None
             # Check the result of vcpupin command.
             check_vcpupin(vm_name, vcpu, cpu_list, pid)
예제 #4
0
 def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
     """
     Run the vcpupin command and then check the result.
     """
     if vm_ref == "name":
         vm_ref = vm.name
     elif vm_ref == "uuid":
         vm_ref = vm.get_uuid()
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is in positive case.
             raise error.TestFail(cmdResult)
         else:
             # Command fail and it is in negative case.
             return
     else:
         if status_error:
             # Command success and it is in negative case.
             raise error.TestFail(cmdResult)
         else:
             # Command success and it is in positive case.
             # "--config" will take effect after VM destroyed.
             pid = None
             vcpu_pid = None
             if options == "--config":
                 virsh.destroy(vm.name)
             else:
                 pid = vm.get_pid()
                 logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                 vcpu_pid = vm.get_vcpus_pid()[vcpu]
             # Check the result of vcpupin command.
             check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
예제 #5
0
 def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
     """
     Run the vcpupin command and then check the result.
     """
     if vm_ref == "name":
         vm_ref = vm.name
     elif vm_ref == "uuid":
         vm_ref = vm.get_uuid()
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is positive case.
             test.fail(cmdResult)
         else:
             # Command fail and it is negative case.
             return
     else:
         if status_error:
             # Command success and it is negative case.
             test.fail(cmdResult)
         else:
             # Command success and it is positive case.
             # "--config" will take effect after VM destroyed.
             pid = None
             vcpu_pid = None
             if options == "--config":
                 virsh.destroy(vm.name)
             else:
                 pid = vm.get_pid()
                 logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                 vcpu_pid = vm.get_vcpus_pid()[vcpu]
             # Check the result of vcpupin command.
             check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
예제 #6
0
 def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid):
     """
     Run the vcpupin command and then check the result.
     """
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is in positive case.
             raise error.TestFail(cmdResult)
         else:
             # Command fail and it is in negative case.
             return
     else:
         if status_error:
             # Command success and it is in negative case.
             raise error.TestFail(cmdResult)
         else:
             # Command success and it is in positive case.
             # "--config" will take effect after VM destroyed.
             if options == "--config":
                 virsh.destroy(vm_name)
                 pid = None
             # Check the result of vcpupin command.
             check_vcpupin(vm_name, vcpu, cpu_list, pid)
예제 #7
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        host_cpu_count = utils.count_cpus()
        for i in range(host_cpu_count):
            ret = virsh.vcpupin(vm_name, 0, i, debug=True, ignore_status=True)
            if ret.exit_status:
                raise error.TestFail("vcpupin failed while numad running, %s"
                                     % bug_url)
            virsh.vcpuinfo(vm_name, debug=True)
    finally:
        utils.run("service numad stop")
        libvirtd.restart()
        backup_xml.sync()
예제 #8
0
    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            vcpus_affinity[item.split(':')[0].strip()] = item.split(':')[1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))
예제 #9
0
def get_vcpupin_dict(vm_name, vcpu=None, options=None):
    """
    Change vcpupin command output to a dict

    :param vm_name: vm name
    :param vcpu: str, vcpu id to get vcpupin value
    :param options: option for vcpupin command
    :return: new dict
    """

    ret = virsh.vcpupin(vm_name,
                        vcpu=vcpu,
                        options=options,
                        debug=True,
                        ignore_status=False)
    return libvirt_misc.convert_to_dict(ret.stdout.strip(), r'(\d+) +(\S+)')
예제 #10
0
def affinity_from_vcpupin(vm):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object

    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_cpus_count()
    result = virsh.vcpupin(vm.name)
    for vcpu in results_stdout_52lts(result).strip().split('\n')[2:]:
        vcpupin_output[int(vcpu.split(":")[0])] = vcpu.split(":")[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
예제 #11
0
def affinity_from_vcpupin(vm):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object

    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_cpus_count()
    result = virsh.vcpupin(vm.name)
    for vcpu in results_stdout_52lts(result).strip().split('\n')[2:]:
        vcpupin_output[int(vcpu.split(":")[0])] = vcpu.split(":")[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
예제 #12
0
파일: cpu.py 프로젝트: SarahYu01/avocado-vt
def affinity_from_vcpupin(vm, vcpu=None, options=None):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object
    :param vcpu: virtual cpu to qeury
    :param options: --live, --current or --config
    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_count() if hasattr(utils, 'total_count') else utils.total_cpus_count()
    result = virsh.vcpupin(vm.name, vcpu=vcpu, options=options, debug=True)
    for vcpu in result.stdout_text.strip().split('\n')[2:]:
        # On newer version of libvirt, there is no ':' in
        # vcpupin output anymore
        vcpupin_output[int(vcpu.split()[0].rstrip(':'))] = vcpu.split()[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = cpus_string_to_affinity_list(vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
예제 #13
0
def affinity_from_vcpupin(vm, vcpu=None, options=None):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object
    :param vcpu: virtual cpu to qeury
    :param options: --live, --current or --config
    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_cpus_count()
    result = virsh.vcpupin(vm.name, vcpu=vcpu, options=options, debug=True)
    for vcpu in results_stdout_52lts(result).strip().split('\n')[2:]:
        # On newer version of libvirt, there is no ':' in
        # vcpupin output anymore
        vcpupin_output[int(vcpu.split()[0].rstrip(':'))] = vcpu.split()[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
def check_vcpupin(vm_name, cpu_range, config=''):
    """
    Check the output of the vcpupin command with auto placement.

    :param vm_name: name of the VM to be executed on
    :param cpu_range: range of CPUs available as a string
    :param config: config parameter as a string, empty by default
    """
    numa_info = utils_misc.NumaInfo()
    result = virsh.vcpupin(vm_name, options=config, debug=True,
                           ignore_status=False)
    range_found = False
    for node in numa_info.get_online_nodes_withcpu():
        if re.search('{}\s*{}'.format(node, cpu_range), result.stdout_text):
            logging.debug('Expected cpu range: {} found in stdout for '
                          'node: {}.'.format(cpu_range, node))
            range_found = True
        else:
            logging.debug('Node {} has no cpu range'.format(node))
        if not range_found:
            raise TestFail('Expected cpu range: {} not found in stdout of '
                           'vcpupin command.'.format(cpu_range))
예제 #15
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        dest_path = os.path.join(data_dir.get_data_dir(), "copy")

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define',
                             'undefine', 'crash', 'device-removal-failed',
                             'watchdog', 'io-error']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, new_disk, target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "blockcommit":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs)
                    snapshot_path = dom.get_blk_devices()['vda']['source']
                    virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % snapshot_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % disk_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda completed")
                    os.unlink(snapshot_path)
                elif event == "blockcopy":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    dom.undefine()
                    virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % disk_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % dest_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda completed")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name, dimm_xml.xml,
                                        flagstr="--config", **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live)
                    expected_events_list.append("'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " + "%s" % action)
                    if action == 'pause':
                        expected_events_list.append("'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part), shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail("Domain state should still be paused due to I/O error!")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
            if os.path.exists(dest_path):
                os.unlink(dest_path)
        return [(dom.name, event) for event in expected_events_list]
예제 #16
0
def run(test, params, env):
    """
    Different vcpupin scenario tests
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest, check for cpu, memory
    3) Do different combinations of vcpupin and in parallel run stress
       if given
    4) Do a optional step based on config
    5) Check guest and host functional

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                if cpu.get_cpu_arch() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt

    vm_name = params.get("main_vm")
    max_vcpu = int(params.get("max_vcpu", 2))
    current_vcpu = int(params.get("current_vcpu", 1))
    vm_cores = int(params.get("limit_vcpu_cores", 2))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    vm = env.get_vm(vm_name)
    condition = params.get("condn", "")
    condn_sleep_sec = int(params.get("condn_sleep_sec", 30))
    pintype = params.get("pintype", "random")
    emulatorpin = "yes" == params.get("emulatorpin", "no")
    config_pin = "yes" == params.get("config_pin", "no")
    iterations = int(params.get("itr", 1))
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    fail = False
    # Destroy the vm
    vm.destroy()
    try:
        cpus_list = cpu.cpu_online_list()
        if len(cpus_list) < 2:
            test.cancel("Need minimum two online host cpus")
        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        if config_pin:
            cpustats = {}
            result = virsh.emulatorpin(vm_name, cpus_list[-1], "config",
                                       debug=True)
            libvirt.check_exit_status(result)
            result = virsh.vcpupin(vm_name, "0", cpus_list[0], "--config",
                                   ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("%s" % detail)

        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect initial guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))

        if config_pin:
            cpustats = utils_hotplug.get_cpustats(vm)
            if not cpustats:
                test.fail("cpu stats command failed to run")

            logging.debug("Check cpustats for emulatorpinned cpu")
            if cpustats[cpus_list[-1]][0] > 0:
                fail = True
                logging.error("Non zero vcputime even with no vcpu pinned")
            if cpustats[cpus_list[-1]][1] == 0:
                fail = True
                logging.error("emulatortime should be positive as it is pinned")

            logging.debug("Check cpustats for vcpupinned cpu")
            if cpustats[cpus_list[0]][0] == 0:
                fail = True
                logging.error("vcputime should be positive as vcpu it is pinned")
            if cpustats[cpus_list[0]][1] > 0:
                fail = True
                logging.error("Non zero emulatortime even with emulator unpinned")

            logging.debug("Check cpustats for non-pinned cpus")
            for index in cpus_list[1:-1]:
                if cpustats[index][2] > 0:
                    fail = True
                    logging.error("Non zero cputime even with no vcpu,emualtor pinned")

        if condition:
            condn_result = set_condition(vm_name, condition)

        # Action:
        for _ in range(iterations):
            if emulatorpin:
                # To make sure cpu to be offline during host_smt
                hostcpu = cpus_list[-1]
                result = virsh.emulatorpin(vm_name, hostcpu, debug=True)
                libvirt.check_exit_status(result)
                cpustats = utils_hotplug.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
            for vcpu in range(max_vcpu):
                if pintype == "random":
                    hostcpu = random.choice(cpus_list[:-1])
                if pintype == "sequential":
                    hostcpu = cpus_list[vcpu % len(cpus_list[:-1])]
                result = virsh.vcpupin(vm_name, vcpu, hostcpu,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                cpustats = utils_hotplug.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
                if config_pin:
                    if cpustats[hostcpu][0] == 0:
                        fail = True
                        logging.error("vcputime should be positive as vcpu is pinned")
                    if cpustats[hostcpu][1] > 0:
                        fail = True
                        logging.error("Non zero emulatortime even with emulator unpinned")
        if condition:
            set_condition(vm_name, condition, reset=True, guestbt=condn_result)

        # Check for guest functional
        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect final guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))
    finally:
        if fail:
            test.fail("Consult previous errors")
        org_xml.sync()
예제 #17
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, "''", target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
예제 #18
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        tmpdir = data_dir.get_tmp_dir()
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        print(dom.name)
        try:
            for event in events_list:
                if event in ['start', 'restore']:
                    if dom.is_alive():
                        dom.destroy()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                if event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "detach-disk":
                    if not os.path.exists(new_disk):
                        open(new_disk, 'a').close()
                    # Attach disk firstly, this event will not be catched
                    virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs)
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
예제 #19
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
        # check the expected vcpu affinity with the one got from running vm
                elif not utils_hotplug.check_affinity(vm, affinity):
                    test.fail("vcpu affinity check fail")
            except xcepts.LibvirtXMLError:
                pass

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
예제 #20
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        # check the expected vcpu affinity with the one got from running vm
        if not utils_hotplug.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num - num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{
                    'vcpu': vcpu,
                    'cpuset': "2"
                }, {
                    'vcpu': vcpu,
                    'cpuset': "3"
                }]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name,
                                          debug=True,
                                          ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail(
                            "fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name,
                                                vcpu,
                                                cputune_cpuset,
                                                debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num - 1, machine_cpuset_path)
        process.run(cmd, shell=True)
예제 #21
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'define', 'undefine',
                        'crash'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
예제 #22
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [
        vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num,
        vcpu_current_num
    ]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s." %
                                (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))
        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name,
                                    vcpu_plug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        if vcpu_unplug:
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name,
                                    vcpu_unplug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
예제 #23
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name, 0, cpu, debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
예제 #24
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(vm.name,
                                            random.choice(
                                                self.host_cpu_list),
                                            **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.max_vcpu,
                             'guest_live': self.max_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.current_vcpu,
                             'guest_live': self.current_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
         elif "reboot" in event:
             vm.reboot()
         elif "nethotplug" in event:
             for iface_num in range(int(self.iface_num)):
                 logging.debug("Try to attach interface %d" % iface_num)
                 mac = utils_net.generate_mac_address_simple()
                 options = ("%s %s --model %s --mac %s %s" %
                            (self.iface_type, self.iface_source['network'],
                             self.iface_model, mac, self.attach_option))
                 logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                 ret = virsh.attach_interface(vm.name, options,
                                              ignore_status=True)
                 time.sleep(self.event_sleep_time)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret)
                 if self.detach_option:
                     options = ("--type %s --mac %s %s" %
                                (self.iface_type, mac, self.detach_option))
                     logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                     ret = virsh.detach_interface(vm.name, options,
                                                  ignore_status=True)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
         elif "diskhotplug" in event:
             for disk_num in range(len(self.device_source_names)):
                 disk = {}
                 disk_attach_error = False
                 disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                 device_source = libvirt.create_local_disk(
                     self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                 disk.update({"format": self.disk_format,
                              "source": device_source})
                 disk_xml = Disk(self.disk_type)
                 disk_xml.device = self.disk_device
                 disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                 ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret, disk_attach_error)
                 if self.detach_option:
                     ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
                     libvirt.delete_local_disk(self.disk_type, disk_name)
         else:
             raise NotImplementedError
예제 #25
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num,
                       vcpu_current_num, vcpu_current_num]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s."
                                % (pin_cpu_list, host_cpu_count))

    cpu_max = int(host_cpu_count) - 1
    if pin_cpu_list == "x":
        pin_cpu_list = str(cpu_max)
    if pin_cpu_list == "x-y":
        pin_cpu_list = "0-%s" % cpu_max
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "0,%s" % cpu_max
    elif pin_cpu_list == "x-y,^z":
        pin_cpu_list = "0-%s,^%s" % (cpu_max, cpu_max)
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))
        vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debue=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debue=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        if vcpu_unplug:
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debue=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debue=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
예제 #26
0
    def vm_stress_events(self, event, vm):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.emulatorpin(vm.name,
                                               random.choice(
                                                   self.host_cpu_list),
                                               **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.max_vcpu,
                                'guest_live': self.max_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.current_vcpu,
                                'guest_live': self.current_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(self.iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (self.iface_type, self.iface_source['network'],
                                self.iface_model, mac, self.attach_option))
                    logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                    ret = virsh.attach_interface(vm.name, options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if self.detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (self.iface_type, mac, self.detach_option))
                        logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                        ret = virsh.detach_interface(vm.name, options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(self.device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(
                        self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                    disk.update({"format": self.disk_format,
                                 "source": device_source})
                    disk_xml = Disk(self.disk_type)
                    disk_xml.device = self.disk_device
                    disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                    ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if self.detach_option:
                        ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(self.disk_type, disk_name)
            else:
                raise NotImplementedError
예제 #27
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if len(node_list) < 2:
            test.cancel('Online NUMA nodes less than 2')
        node_a, node_b = min(node_list), max(node_list)
        numa_memory.update({'nodeset': '%d,%d' % (node_a, node_b)})
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name,
                                0,
                                cpu,
                                debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
예제 #28
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
        6. specify vcpu affinity for inactive vcpu
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    setvcpus_option = params.get("setvcpus_option", "")
    setvcpus_count = params.get("setvcpus_count", "0")
    vcpupin_option = params.get("vcpupin_option", "")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    hotplug_vcpu = "yes" == params.get("hotplug_vcpu", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "180"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpuutil.total_cpus_count())
        if hostcpu_num < 8:
            test.cancel("The host should have at least 8 CPUs for this test.")

        # online all host cpus
        for x in range(1, hostcpu_num):
            if cpuutil.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

            # Remove numatune node since it will be automatically set
            # under 'auto' state
            if vcpu_placement == 'auto':
                vmxml.xmltreefile.remove_by_xpath('/numatune', remove_all=True)
                vmxml.xmltreefile.write()

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # test vcpu cpuset in offline/online host cpu scenario
        if check.endswith("offline_hostcpu"):
            for x in offline_hostcpus.split(','):
                if cpuutil.offline(x):
                    test.fail("fail to offline cpu{}".format(x))
                logging.debug("offline host cpu {}".format(x))

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online host cpu scenario
            if check.endswith("offline_hostcpu") and not status_error:
                # online host cpu
                if cpuutil.online(cputune_cpuset):
                    test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, vcpupin_option, debug=True)

            # hotplug vcpu test scenario
            if hotplug_vcpu:
                virsh.setvcpus(vm_name, setvcpus_count, setvcpus_option, debug=True, ignore_status=False)

            libvirtd_restart = False
            while True:
                if check == "vcpu_placement":
                    check_vcpu_placement(test, params)
                elif not status_error:
                    check_vcpu_affinity()
                if libvirtd_restart:
                    break
                # restart libvirtd and check vcpu affinity again
                utils_libvirtd.Libvirtd().restart()
                libvirtd_restart = True

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(1, hostcpu_num):
            cpuutil.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_uptime_init = 0
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_failed = 0

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        vm_uptime_init = vm.uptime()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num):
                logging.error("Expected vcpu check failed")
                result_failed += 1
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num_bk, {}, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1
        if vm.uptime() < vm_uptime_init:
            test.fail("Unexpected VM reboot detected in between test")
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if result_failed > 0:
            test.fail("Test Failed")
예제 #30
0
    def vm_stress_events(self, event, vm, params):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        current_vcpu = int(params.get("smp", 2))
        max_vcpu = int(params.get("vcpu_maxcpus", 2))
        iface_num = params.get("iface_num", '1')
        iface_type = params.get("iface_type", "network")
        iface_model = params.get("iface_model", "virtio")
        iface_source = eval(params.get("iface_source",
                                       "{'network':'default'}"))
        attach_option = params.get("attach_option", "")
        detach_option = params.get("detach_option", "")
        disk_size = params.get("virt_disk_device_size", "1")
        disk_type = params.get("disk_type", "file")
        disk_device = params.get("disk_device", "disk")
        disk_format = params.get("disk_format", "qcow2")
        device_target = params.get("virt_disk_device_target", "vda").split()
        path = params.get("path", "")
        device_source_names = params.get("virt_disk_device_source", "").split()
        disk_driver = params.get("driver_name", "qemu")
        self.ignore_status = params.get("ignore_status", "no") == "yes"
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(current_vcpu):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                result = virsh.emulatorpin(vm.name,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, max_vcpu, "--live", **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {
                        'max_config': max_vcpu,
                        'max_live': max_vcpu,
                        'cur_config': current_vcpu,
                        'cur_live': max_vcpu,
                        'guest_live': max_vcpu
                    }
                    utils_hotplug.check_vcpu_value(vm,
                                                   exp_vcpu,
                                                   option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {
                        'max_config': max_vcpu,
                        'max_live': max_vcpu,
                        'cur_config': current_vcpu,
                        'cur_live': current_vcpu,
                        'guest_live': current_vcpu
                    }
                    utils_hotplug.check_vcpu_value(vm,
                                                   exp_vcpu,
                                                   option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    logging.debug(
                        "VM name: %s , Options for Network attach: %s",
                        vm.name, options)
                    ret = virsh.attach_interface(vm.name,
                                                 options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        logging.debug(
                            "VM name: %s , Options for Network detach: %s",
                            vm.name, options)
                        ret = virsh.detach_interface(vm.name,
                                                     options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(path, vm.name,
                                             device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(
                        disk_type,
                        disk_name,
                        disk_size,
                        disk_format=disk_format)
                    disk.update({
                        "format": disk_format,
                        "source": device_source
                    })
                    disk_xml = Disk(disk_type)
                    disk_xml.device = disk_device
                    disk_xml.driver = {
                        "name": disk_driver,
                        "type": disk_format
                    }
                    ret = virsh.attach_disk(vm.name,
                                            disk["source"],
                                            device_target[disk_num],
                                            attach_option,
                                            debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if detach_option:
                        ret = virsh.detach_disk(vm.name,
                                                device_target[disk_num],
                                                extra=detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(disk_type, disk_name)
            else:
                raise NotImplementedError
            time.sleep(self.itr_sleep_time)
예제 #31
0
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ['start', 'restore']:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:"
                                             " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:"
                                             " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:"
                                                 " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:"
                                             " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, '0', '0', **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:"
                                             "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, '0', **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:"
                                             "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, 'a').close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, 'vdb', **virsh_dargs)
                 virsh.detach_disk(vm_name, 'vdb', **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:"
                                             " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
예제 #32
0
def run(test, params, env):
    """
    Test commands: virsh.vcpupin, virsh.iothreadpin, virsh.emulatorpin.

    Steps:
    - Configure the test VM
    - Check default values are correct
    - Perform virsh vcpupin, check iothreadpin and emulatorpin are not impacted
    - Perform virsh emulatorpin, check vcpupin and iothreadpin are not impacted
    - Perform virsh iotheadpin, check vcpupin and emulatorpin are not impacted
    - Recover test environment
    """

    start_vm = params.get("start_vm", "yes") == "yes"
    change_vcpupin = params.get("change_vcpupin", "no") == 'yes'
    change_emulatorpin = params.get("change_emulatorpin", "no") == 'yes'
    change_iothreadpin = params.get("change_iothreadpin", "no") == 'yes'

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    check_to_skip_case(params, test)

    if vm.is_alive():
        vm.destroy()

    original_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_copy = original_vm_xml.copy()
    prepare_vm(original_vm_xml, params)

    host_cpus = cpu.online_cpus_count()
    cpu_max_index = int(host_cpus) - 1

    try:

        if start_vm:
            vm.start()
        logging.debug("After vm starts, vm xml is:"
                      "%s\n", vm_xml.VMXML.new_from_dumpxml(vm_name))
        logging.debug("Get default vcpupin/emulatorpin/iothreadpin values of the vm")
        vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm_name)
        logging.debug("Check and compare default vcpupin/emulatorpin/iothreadpin values")
        compare_results(vcpupin_result, emulatorpin_result,
                        iothreadpin_result, params.get("iothreadid", '1'), test)
        if change_vcpupin:
            # Change vcpupin, then check vcpupin, and emulatorpin/iothreadpin
            # should not be effected.
            logging.debug("Now change vcpupin value to the guest")
            cpu_list = "0-%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
            virsh.vcpupin(vm_name, "0", cpu_list, None, debug=True, ignore_status=False)
            changed_vcpupin = {'0': cpu_list}
            check_vcpupin(vcpupin_result, changed_vcpupin, vm_name, test)
            check_emulatorpin(emulatorpin_result, None, vm_name, test)
            check_iothreadpin(iothreadpin_result, None, vm_name, test)

        if change_emulatorpin:
            # Change emulatorpin, then check emulatorpin, and vcpupin/iothreadpin
            # should not be effected
            logging.debug("Now change emulatorpin value to the guest")
            vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm_name)
            cpu_list = "0,%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
            virsh.emulatorpin(vm_name, cpu_list, ignore_status=False, debug=True)
            changed_emulatorpin = {'*': cpu_list}
            check_emulatorpin(emulatorpin_result, changed_emulatorpin, vm_name, test)
            check_vcpupin(vcpupin_result, None, vm_name, test)
            check_iothreadpin(iothreadpin_result, None, vm_name, test)

        if change_iothreadpin:
            # Change iothreadpin, then check iothreadpin, and vcpupin/emulatorpin
            # should not be effected
            logging.debug("Now change iothreadpin value to the guest")
            vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm_name)
            cpu_list = "%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
            iothread_id = params.get("iothread_id", "1")
            virsh.iothreadpin(vm_name, iothread_id, cpu_list,  ignore_status=False, debug=True)
            changed_iothreadpin = {iothread_id: cpu_list}
            check_iothreadpin(iothreadpin_result, changed_iothreadpin, vm_name, test)
            check_vcpupin(vcpupin_result, None, vm_name, test)
            check_emulatorpin(emulatorpin_result, None, vm_name, test)
    finally:
        vm_xml_copy.sync()
        if vm.is_alive():
            vm.destroy()
예제 #33
0
def run(test, params, env):
    """
    Different vcpupin scenario tests
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest, check for cpu, memory
    3) Do different combinations of vcpupin and in parallel run stress
       if given
    4) Do a optional step based on config
    5) Check guest and host functional

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocado_test":
                testlist = utils_test.get_avocadotestlist(params)
                bt = utils_test.run_avocado_bg(vm, params, test, testlist)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms",
                                       params=params,
                                       vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        max_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': max_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': max_vcpu,
                    'guest_live': max_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel(
                        "Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + ".save")
                result = virsh.save(vm_name,
                                    save_file,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocado_test":
                guestbt.join()
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms",
                                         params=params,
                                         vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        current_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': current_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': current_vcpu,
                    'guest_live': current_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(
                        os.path.join(root_cpuset_path, "machine.slice")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path,
                                                "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt

    vm_name = params.get("main_vm")
    max_vcpu = int(params.get("max_vcpu", 2))
    current_vcpu = int(params.get("current_vcpu", 1))
    vm_cores = int(params.get("limit_vcpu_cores", 2))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    vm = env.get_vm(vm_name)
    condition = params.get("condn", "")
    condn_sleep_sec = int(params.get("condn_sleep_sec", 30))
    pintype = params.get("pintype", "random")
    emulatorpin = "yes" == params.get("emulatorpin", "no")
    config_pin = "yes" == params.get("config_pin", "no")
    iterations = int(params.get("itr", 1))
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    fail = False
    # Destroy the vm
    vm.destroy()
    try:
        cpus_list = cpuutil.cpu_online_list()
        if len(cpus_list) < 2:
            test.cancel("Need minimum two online host cpus")
        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        if config_pin:
            cpustats = {}
            result = virsh.emulatorpin(vm_name,
                                       cpus_list[-1],
                                       "config",
                                       debug=True)
            libvirt.check_exit_status(result)
            result = virsh.vcpupin(vm_name,
                                   "0",
                                   cpus_list[0],
                                   "--config",
                                   ignore_status=True,
                                   debug=True)
            libvirt.check_exit_status(result)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("%s" % detail)

        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect initial guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))

        if config_pin:
            cpustats = cpu.get_cpustats(vm)
            if not cpustats:
                test.fail("cpu stats command failed to run")

            logging.debug("Check cpustats for emulatorpinned cpu")
            if cpustats[cpus_list[-1]][0] > 0:
                fail = True
                logging.error("Non zero vcputime even with no vcpu pinned")
            if cpustats[cpus_list[-1]][1] == 0:
                fail = True
                logging.error(
                    "emulatortime should be positive as it is pinned")

            logging.debug("Check cpustats for vcpupinned cpu")
            if cpustats[cpus_list[0]][0] == 0:
                fail = True
                logging.error(
                    "vcputime should be positive as vcpu it is pinned")
            if cpustats[cpus_list[0]][1] > 0:
                fail = True
                logging.error(
                    "Non zero emulatortime even with emulator unpinned")

            logging.debug("Check cpustats for non-pinned cpus")
            for index in cpus_list[1:-1]:
                if cpustats[index][2] > 0:
                    fail = True
                    logging.error(
                        "Non zero cputime even with no vcpu,emualtor pinned")

        if condition:
            condn_result = set_condition(vm_name, condition)

        # Action:
        for _ in range(iterations):
            if emulatorpin:
                # To make sure cpu to be offline during host_smt
                hostcpu = cpus_list[-1]
                result = virsh.emulatorpin(vm_name, hostcpu, debug=True)
                libvirt.check_exit_status(result)
                cpustats = cpu.get_cpustats(vm, hostcpu)
                logging.debug(
                    "hostcpu:%s vcputime: %s emulatortime: "
                    "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                    cpustats[hostcpu][1], cpustats[hostcpu][2])
            for vcpu in range(max_vcpu):
                if pintype == "random":
                    hostcpu = random.choice(cpus_list[:-1])
                if pintype == "sequential":
                    hostcpu = cpus_list[vcpu % len(cpus_list[:-1])]
                result = virsh.vcpupin(vm_name,
                                       vcpu,
                                       hostcpu,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                cpustats = cpu.get_cpustats(vm, hostcpu)
                logging.debug(
                    "hostcpu:%s vcputime: %s emulatortime: "
                    "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                    cpustats[hostcpu][1], cpustats[hostcpu][2])
                if config_pin:
                    if cpustats[hostcpu][0] == 0:
                        fail = True
                        logging.error(
                            "vcputime should be positive as vcpu is pinned")
                    if cpustats[hostcpu][1] > 0:
                        fail = True
                        logging.error(
                            "Non zero emulatortime even with emulator unpinned"
                        )
        if condition:
            set_condition(vm_name, condition, reset=True, guestbt=condn_result)

        # Check for guest functional
        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect final guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))
    finally:
        if fail:
            test.fail("Consult previous errors")
        org_xml.sync()
예제 #34
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        tmpdir = data_dir.get_tmp_dir()
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        print dom.name
        try:
            for event in events_list:
                if event in ['start', 'restore']:
                    if dom.is_alive():
                        dom.destroy()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                if event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "detach-disk":
                    if not os.path.exists(new_disk):
                        open(new_disk, 'a').close()
                    # Attach disk firstly, this event will not be catched
                    virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs)
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                else:
                    raise error.TestError("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
예제 #35
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in ['start', 'restore', 'create', 'define', 'undefine']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished after guest request")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Stopped Shutdown")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished after host request")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Stopped Shutdown")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, "''", target_device,
                                 "--type cdrom --sourcetype file --config")
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, ignore_status=True, debug=True)
                    expected_events_list.append("'tray-change' for %s disk ide0-1-0:"
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk ide0-1-0:"
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, ignore_status=True, debug=True)
                    expected_events_list.append("'tray-change' for %s disk ide0-1-0:"
                                                " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_vcpu = True

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num)
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num_bk,
                                                                 {},
                                                                 setvcpu_option)

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num,
                                                                 expect_vcpupin,
                                                                 setvcpu_option)

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num,
                                                                 expect_vcpupin,
                                                                 setvcpu_option)

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if not result_vcpu:
            test.fail("Test Failed")
예제 #37
0
    vmxml = VMXML()
    vmxml['xml'] = virsh.dumpxml(vm.name, dumpxml_option).stdout.strip()
    try:
        if vmxml['vcpu'] != int(expect_vcpu_num[0]):
            raise error.TestFail("Max vcpu number %s in domain XML is not"
                                 " expected" % vmxml['vcpu'])
        if vmxml['current_vcpu'] != expect_vcpu_num[i]:
            raise error.TestFail("Current vcpu number %s in domain XML is"
                                 " not expected" % vmxml['current_vcpu'])
    except (ValueError, IndexError), detail:
        raise error.TestFail(detail)
    logging.debug("Vcpu number in domain xml check pass")

    # check cpu affinity got from vcpupin command output, and vcpupin command
    # output, and vcpupin info(cputune element) in domain xml
    result = virsh.vcpupin(vm.name, ignore_status=True, debug=True)
    libvirt.check_exit_status(result)
    vcpupin_output = result.stdout.strip().splitlines()[2:]
    if expect_vcpupin:
        host_cpu_count = utils.count_cpus()
        xml_affinity_list = []
        xml_affinity = {}
        try:
            xml_affinity_list = vmxml['cputune'].vcpupins
        except LibvirtXMLNotFoundError:
            logging.debug("No <cputune> element find in domain xml")
        # Store xml_affinity_list to a dict
        for vcpu in xml_affinity_list:
            xml_affinity[vcpu['vcpu']] = "".join(
                libvirt.cpus_string_to_affinity_list(vcpu['cpuset'],
                                                     host_cpu_count))
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num,
                       vcpu_current_num, vcpu_current_num]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s."
                                % (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))

        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
        # when VM started , and it required that vcpu 0(id=1) is always
        # present and non-hotpluggable, which means we can't hotunplug these
        # vcpus directly. So we can either hotplug more vcpus before we do
        # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
        # vcpus except vcpu 0, to make sure libvirt can find appropriate
        # hotpluggable vcpus to reach the desired target vcpu count. For
        # simple prepare step, here we choose to hotplug more vcpus.
        if vcpu_unplug:
            if setvcpu_option == "--live":
                logging.info("Hotplug vcpu to the maximum count to make sure"
                             " all these new plugged vcpus are hotunpluggable")
                result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                        debug=True)
                libvirt.check_exit_status(result)
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
예제 #39
0
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ["start", "restore"]:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:" " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, "0", "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, "a").close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs)
                 virsh.detach_disk(vm_name, "vdb", **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:" " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
예제 #40
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(
                     vm.name, random.choice(self.host_cpu_list), **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {
                     'max_config': self.max_vcpu,
                     'max_live': self.max_vcpu,
                     'cur_config': self.current_vcpu,
                     'cur_live': self.max_vcpu,
                     'guest_live': self.max_vcpu
                 }
                 utils_hotplug.check_vcpu_value(vm,
                                                exp_vcpu,
                                                option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {
                     'max_config': self.max_vcpu,
                     'max_live': self.max_vcpu,
                     'cur_config': self.current_vcpu,
                     'cur_live': self.current_vcpu,
                     'guest_live': self.current_vcpu
                 }
                 utils_hotplug.check_vcpu_value(vm,
                                                exp_vcpu,
                                                option="--live")
         elif "reboot" in event:
             vm.reboot()
         else:
             raise NotImplementedError
    vmxml = VMXML()
    vmxml['xml'] = virsh.dumpxml(vm.name, dumpxml_option).stdout.strip()
    try:
        if vmxml['vcpu'] != int(expect_vcpu_num[0]):
            raise error.TestFail("Max vcpu number %s in domain XML is not"
                                 " expected" % vmxml['vcpu'])
        if vmxml['current_vcpu'] != expect_vcpu_num[i]:
            raise error.TestFail("Current vcpu number %s in domain XML is"
                                 " not expected" % vmxml['current_vcpu'])
    except (ValueError, IndexError), detail:
        raise error.TestFail(detail)
    logging.debug("Vcpu number in domain xml check pass")

    # check cpu affinity got from vcpupin command output, and vcpupin command
    # output, and vcpupin info(cputune element) in domain xml
    result = virsh.vcpupin(vm.name, ignore_status=True, debug=True)
    libvirt.check_exit_status(result)
    vcpupin_output = result.stdout.strip().splitlines()[2:]
    if expect_vcpupin:
        host_cpu_count = utils.count_cpus()
        xml_affinity_list = []
        xml_affinity = {}
        try:
            xml_affinity_list = vmxml['cputune'].vcpupins
        except LibvirtXMLNotFoundError:
            logging.debug("No <cputune> element find in domain xml")
        # Store xml_affinity_list to a dict
        for vcpu in xml_affinity_list:
            xml_affinity[vcpu['vcpu']] = "".join(
                libvirt.cpus_string_to_affinity_list(vcpu['cpuset'],
                                                     host_cpu_count))
예제 #42
0
def test_change_vcpupin_emulatorpin_iothreadpin(test, guest_xml, cpu_max_index, vm, params):
    """
    - Check default values are correct
    - Perform virsh vcpupin, check iothreadpin and emulatorpin are not impacted
    - Perform virsh emulatorpin, check vcpupin and iothreadpin are not impacted
    - Perform virsh iotheadpin, check vcpupin and emulatorpin are not impacted

    :param test: test object
    :param guest_xml: vm xml
    :param cpu_max_index: int, cpu maximum index on host
    :param vm: vm instance
    :param params: test dict
    :return: None
    """
    def _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                      changed_vcpupin, changed_emulatorpin, changed_iothreadpin):
        """
        Internal common function to check the command result

        :param vcpupin_result: dict, the vcpupin command result
        :param emulatorpin_result: dict, the emulatorpin command result
        :param iothreadpin_result: dict, the iothreadpin command result
        :param changed_vcpupin: dict, the changed value for vcpupin
        :param changed_emulatorpin: dict, the changed value for emulatorpin
        :param changed_iothreadpin: dict, the changed value for iothreadpin
        :return: None
        """
        check_vcpupin(vcpupin_result, changed_vcpupin, vm.name, test)
        check_emulatorpin(emulatorpin_result, changed_emulatorpin, vm.name, test)
        check_iothreadpin(iothreadpin_result, changed_iothreadpin, vm.name, test)

    prepare_vm(guest_xml, params)
    vm.start()
    logging.debug("After vm starts, vm xml is:"
                  "%s\n", vm_xml.VMXML.new_from_dumpxml(vm.name))

    logging.debug("Get default vcpupin/emulatorpin/iothreadpin values of the vm")
    vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm.name)
    logging.debug("Check and compare default vcpupin/emulatorpin/iothreadpin values")
    compare_results(vcpupin_result, emulatorpin_result,
                    iothreadpin_result, params.get("iothread_id"), test)

    # Change vcpupin, then check vcpupin, and emulatorpin/iothreadpin
    # should not be effected.
    logging.debug("Now change vcpupin value to the guest")
    cpu_list = "0-%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
    virsh.vcpupin(vm.name, "0", cpu_list, None, debug=True, ignore_status=False)
    changed_vcpupin = {'0': cpu_list}
    _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                  changed_vcpupin, None, None)

    # Change emulatorpin, then check emulatorpin, and vcpupin/iothreadpin
    # should not be effected
    logging.debug("Now change emulatorpin value to the guest")
    vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm.name)
    cpu_list = "0,%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
    virsh.emulatorpin(vm.name, cpu_list, ignore_status=False, debug=True)
    changed_emulatorpin = {'*': cpu_list}
    _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                  None, changed_emulatorpin, None)

    # Change iothreadpin, then check iothreadpin, and vcpupin/emulatorpin
    # should not be effected
    logging.debug("Now change iothreadpin value to the guest")
    vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm.name)
    cpu_list = "%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
    iothread_id = params.get("iothread_id")
    virsh.iothreadpin(vm.name, iothread_id, cpu_list, ignore_status=False, debug=True)
    changed_iothreadpin = {iothread_id: cpu_list}
    _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                  None, None, changed_iothreadpin)