Example #1
0
 def set_condn(action, recover=False):
     """
     Set/reset guest state/action
     :param action: Guest state change/action
     :param recover: whether to recover given state default: False
     """
     if not recover:
         if action == "pin_vcpu":
             for i in range(cur_vcpu):
                 virsh.vcpupin(vm_name, i, hmi_cpu, "--live",
                               ignore_status=False, debug=True)
                 virsh.emulatorpin(vm_name,  hmi_cpu, "live",
                                   ignore_status=False, debug=True)
         elif action == "filetrans":
             utils_test.run_file_transfer(test, params, env)
         elif action == "save":
             save_file = os.path.join(data_dir.get_tmp_dir(),
                                      vm_name + ".save")
             result = virsh.save(vm_name, save_file, ignore_status=True,
                                 debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             if os.path.exists(save_file):
                 result = virsh.restore(save_file, ignore_status=True,
                                        debug=True)
                 utils_test.libvirt.check_exit_status(result)
                 os.remove(save_file)
         elif action == "suspend":
             result = virsh.suspend(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             result = virsh.resume(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
     return
def get_emulatorpin_parameter(params, test):
    """
    Get the emulatorpin parameters
    :params: the parameter dictionary
    """
    vm_name = params.get("main_vm")
    vm = params.get("vm")
    options = params.get("emulatorpin_options")
    start_vm = params.get("start_vm", "yes")

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    result = virsh.emulatorpin(vm_name, options=options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    if status_error == "yes":
        if status or not check_emulatorpin(params, test):
            logging.info("It's an expected : %s", result.stderr)
        else:
            test.fail("%d not a expected command " "return value" % status)
    elif status_error == "no":
        if status:
            test.fail(result.stderr)
        else:
            logging.info(result.stdout)
def get_emulatorpin_parameter(params):
    """
    Get the emulatorpin parameters
    :params: the parameter dictionary
    """
    vm_name = params.get("main_vm")
    vm = params.get("vm")
    options = params.get("emulatorpin_options")
    start_vm = params.get("start_vm", "yes")

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    result = virsh.emulatorpin(vm_name, options=options)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    if status_error == "yes":
        if status or not check_emulatorpin(params):
            logging.info("It's an expected : %s", result.stderr)
        else:
            raise error.TestFail("%d not a expected command "
                                 "return value", status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            logging.info(result.stdout)
def check_emulatorpin(params):
    """
    Check emulator affinity
    :params: the parameter dictionary
    """
    dicts = {}
    vm = params.get("vm")
    vm_name = params.get("main_vm")
    cpu_list = params.get("cpu_list")
    cgconfig = params.get("cgconfig", "on")
    options = params.get("emulatorpin_options")

    result = virsh.emulatorpin(vm_name)
    cmd_output = result.stdout.strip().splitlines()
    logging.debug(cmd_output)
    # Parsing command output and putting them into python dictionary.
    for l in cmd_output[2:]:
        k, v = l.split(':')
        dicts[k.strip()] = v.strip()

    logging.debug(dicts)

    emulator_from_cmd = dicts['*']
    emulatorpin_from_xml = ""

    # To change a running guest with 'config' option, which will affect
    # next boot, if don't shutdown the guest, we need to run virsh dumpxml
    # with 'inactive' option to get guest XML changes.
    if options == "config" and vm and not vm.is_alive():
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name, "--inactive").cputune.emulatorpin
    else:
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name).cputune.emulatorpin

    # To get guest corresponding emulator/cpuset.cpus value
    # from cpuset controller of the cgroup.
    if cgconfig == "on" and vm and vm.is_alive():
        emulatorpin_from_cgroup = get_emulatorpin_from_cgroup(params)
        logging.debug("The emulatorpin value from "
                      "cgroup: %s", emulatorpin_from_cgroup)

    # To check specified cpulist value with virsh command output
    # and/or cpuset.cpus from cpuset controller of the cgroup.
    if cpu_list:
        if vm and vm.is_alive() and options != "config":
            if (cpu_list != cpus_parser(emulator_from_cmd)) or \
                    (cpu_list != cpus_parser(emulatorpin_from_cgroup)):
                logging.error("To expect emulatorpin %s: %s",
                              cpu_list, emulator_from_cmd)
                return False
        else:
            if cpu_list != cpus_parser(emulatorpin_from_xml):
                logging.error("To expect emulatorpin %s: %s",
                              cpu_list, emulatorpin_from_xml)
                return False

        return True
def check_emulatorpin(params, test):
    """
    Check emulator affinity
    :params: the parameter dictionary
    """
    dicts = {}
    vm = params.get("vm")
    vm_name = params.get("main_vm")
    cpu_list = params.get("cpu_list")
    cgconfig = params.get("cgconfig", "on")
    options = params.get("emulatorpin_options")

    result = virsh.emulatorpin(vm_name, debug=True)
    cmd_output = result.stdout.strip().splitlines()
    logging.debug(cmd_output)
    # Parsing command output and putting them into python dictionary.
    for l in cmd_output[2:]:
        k, v = l.split(':')
        dicts[k.strip()] = v.strip()

    logging.debug(dicts)

    emulator_from_cmd = dicts['*']
    emulatorpin_from_xml = ""

    # To change a running guest with 'config' option, which will affect
    # next boot, if don't shutdown the guest, we need to run virsh dumpxml
    # with 'inactive' option to get guest XML changes.
    if options == "config" and vm and not vm.is_alive():
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name, "--inactive").cputune.emulatorpin
    else:
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name).cputune.emulatorpin

    # To get guest corresponding emulator/cpuset.cpus value
    # from cpuset controller of the cgroup.
    if cgconfig == "on" and vm and vm.is_alive():
        emulatorpin_from_cgroup = get_emulatorpin_from_cgroup(params, test)
        logging.debug("The emulatorpin value from "
                      "cgroup: %s", emulatorpin_from_cgroup)

    # To check specified cpulist value with virsh command output
    # and/or cpuset.cpus from cpuset controller of the cgroup.
    if cpu_list:
        if vm and vm.is_alive() and options != "config":
            if (cpu_list != cpus_parser(emulator_from_cmd)) or \
                    (cpu_list != cpus_parser(emulatorpin_from_cgroup)):
                logging.error("To expect emulatorpin %s: %s", cpu_list,
                              emulator_from_cmd)
                return False
        else:
            if cpu_list != cpus_parser(emulatorpin_from_xml):
                logging.error("To expect emulatorpin %s: %s", cpu_list,
                              emulatorpin_from_xml)
                return False

        return True
def check_emulatorpin(vm_name, cpu_range, config=''):
    """
    Check the output of the emulatorpin command with auto placement.

    :param vm_name: name of the VM to be executed on
    :param cpu_range: range of CPUs available as a string
    :param config: config parameter as a string, empty by default
    """
    result = virsh.emulatorpin(vm_name, options=config, debug=True,
                               ignore_status=False)
    if re.search('\*:\s*{}'.format(cpu_range), result.stdout_text):
        logging.debug('Expected cpu range: {} found in stdout for '
                      'emulatorpin.'.format(cpu_range))
    else:
        raise TestFail('Expected cpu range: {} not found in stdout of '
                       'emulatorpin command.'.format(cpu_range))
Example #7
0
def set_emulatorpin_parameter(params):
    """
    Set the emulatorpin parameters
    :params: the parameter dictionary
    """
    vm_name = params.get("main_vm")
    vm = params.get("vm")
    cpulist = params.get("emulatorpin_cpulist")
    options = params.get("emulatorpin_options")
    start_vm = params.get("start_vm", "yes")

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    result = virsh.emulatorpin(vm_name, cpulist, options)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error")

    # Changing affinity for emulator thread dynamically is
    # not allowed when CPU placement is 'auto'
    placement = vm_xml.VMXML().new_from_dumpxml(vm_name).placement
    if placement == "auto":
        status_error = "yes"

    if status_error == "yes":
        if status or not check_emulatorpin(params):
            logging.info("It's an expected : %s", result.stderr)
        else:
            raise error.TestFail("%d not a expected command "
                                 "return value", status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            if check_emulatorpin(params):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The 'cpulist' is inconsistent with"
                                     " 'cpulist' emulatorpin XML or/and is"
                                     " different from emulator/cpuset.cpus"
                                     " value from cgroup cpuset controller")
Example #8
0
def set_emulatorpin_parameter(params, test):
    """
    Set the emulatorpin parameters

    :params: the parameter dictionary
    :param test: the test object
    :raises: test.fail if command fails
    """
    vm_name = params.get("main_vm")
    vm = params.get("vm")
    cpulist = params.get("emulatorpin_cpulist")
    options = params.get("emulatorpin_options")
    start_vm = params.get("start_vm", "yes")

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    result = virsh.emulatorpin(vm_name, cpulist, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error")
    err_msg = params.get("err_msg")

    if status_error == "yes":
        if err_msg:
            libvirt.check_result(result, expected_fails=[err_msg])
        elif status or not check_emulatorpin(params, test):
            logging.info("It's an expected : %s", result.stderr)
        else:
            test.fail("%d not a expected command " "return value" % status)
    elif status_error == "no":
        if status:
            test.fail(result.stderr)
        else:
            if check_emulatorpin(params, test):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'cpulist' is inconsistent with"
                          " 'cpulist' emulatorpin XML or/and is"
                          " different from emulator/cpuset.cpus"
                          " value from cgroup cpuset controller")
Example #9
0
def set_emulatorpin_parameter(params):
    """
    Set the emulatorpin parameters
    :params: the parameter dictionary
    """
    vm_name = params.get("main_vm")
    vm = params.get("vm")
    cpulist = params.get("emulatorpin_cpulist")
    options = params.get("emulatorpin_options")
    start_vm = params.get("start_vm", "yes")

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    result = virsh.emulatorpin(vm_name, cpulist, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error")

    if status_error == "yes":
        if status or not check_emulatorpin(params):
            logging.info("It's an expected : %s", result.stderr)
        else:
            raise error.TestFail("%d not a expected command "
                                 "return value" % status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            if check_emulatorpin(params):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The 'cpulist' is inconsistent with"
                                     " 'cpulist' emulatorpin XML or/and is"
                                     " different from emulator/cpuset.cpus"
                                     " value from cgroup cpuset controller")
Example #10
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'define', 'undefine',
                        'crash'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Example #11
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in ['start', 'restore', 'create', 'define', 'undefine']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished after guest request")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Stopped Shutdown")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished after host request")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Stopped Shutdown")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, "''", target_device,
                                 "--type cdrom --sourcetype file --config")
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, ignore_status=True, debug=True)
                    expected_events_list.append("'tray-change' for %s disk ide0-1-0:"
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk ide0-1-0:"
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, ignore_status=True, debug=True)
                    expected_events_list.append("'tray-change' for %s disk ide0-1-0:"
                                                " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Example #12
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(vm.name,
                                            random.choice(
                                                self.host_cpu_list),
                                            **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.max_vcpu,
                             'guest_live': self.max_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.current_vcpu,
                             'guest_live': self.current_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
         elif "reboot" in event:
             vm.reboot()
         elif "nethotplug" in event:
             for iface_num in range(int(self.iface_num)):
                 logging.debug("Try to attach interface %d" % iface_num)
                 mac = utils_net.generate_mac_address_simple()
                 options = ("%s %s --model %s --mac %s %s" %
                            (self.iface_type, self.iface_source['network'],
                             self.iface_model, mac, self.attach_option))
                 logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                 ret = virsh.attach_interface(vm.name, options,
                                              ignore_status=True)
                 time.sleep(self.event_sleep_time)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret)
                 if self.detach_option:
                     options = ("--type %s --mac %s %s" %
                                (self.iface_type, mac, self.detach_option))
                     logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                     ret = virsh.detach_interface(vm.name, options,
                                                  ignore_status=True)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
         elif "diskhotplug" in event:
             for disk_num in range(len(self.device_source_names)):
                 disk = {}
                 disk_attach_error = False
                 disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                 device_source = libvirt.create_local_disk(
                     self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                 disk.update({"format": self.disk_format,
                              "source": device_source})
                 disk_xml = Disk(self.disk_type)
                 disk_xml.device = self.disk_device
                 disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                 ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret, disk_attach_error)
                 if self.detach_option:
                     ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
                     libvirt.delete_local_disk(self.disk_type, disk_name)
         else:
             raise NotImplementedError
Example #13
0
def run(test, params, env):
    """
    Different vcpupin scenario tests
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest, check for cpu, memory
    3) Do different combinations of vcpupin and in parallel run stress
       if given
    4) Do a optional step based on config
    5) Check guest and host functional

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocado_test":
                testlist = utils_test.get_avocadotestlist(params)
                bt = utils_test.run_avocado_bg(vm, params, test, testlist)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms",
                                       params=params,
                                       vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        max_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': max_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': max_vcpu,
                    'guest_live': max_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel(
                        "Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + ".save")
                result = virsh.save(vm_name,
                                    save_file,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocado_test":
                guestbt.join()
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms",
                                         params=params,
                                         vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        current_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': current_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': current_vcpu,
                    'guest_live': current_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(
                        os.path.join(root_cpuset_path, "machine.slice")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path,
                                                "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt

    vm_name = params.get("main_vm")
    max_vcpu = int(params.get("max_vcpu", 2))
    current_vcpu = int(params.get("current_vcpu", 1))
    vm_cores = int(params.get("limit_vcpu_cores", 2))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    vm = env.get_vm(vm_name)
    condition = params.get("condn", "")
    condn_sleep_sec = int(params.get("condn_sleep_sec", 30))
    pintype = params.get("pintype", "random")
    emulatorpin = "yes" == params.get("emulatorpin", "no")
    config_pin = "yes" == params.get("config_pin", "no")
    iterations = int(params.get("itr", 1))
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    fail = False
    # Destroy the vm
    vm.destroy()
    try:
        cpus_list = cpuutil.cpu_online_list()
        if len(cpus_list) < 2:
            test.cancel("Need minimum two online host cpus")
        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        if config_pin:
            cpustats = {}
            result = virsh.emulatorpin(vm_name,
                                       cpus_list[-1],
                                       "config",
                                       debug=True)
            libvirt.check_exit_status(result)
            result = virsh.vcpupin(vm_name,
                                   "0",
                                   cpus_list[0],
                                   "--config",
                                   ignore_status=True,
                                   debug=True)
            libvirt.check_exit_status(result)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("%s" % detail)

        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect initial guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))

        if config_pin:
            cpustats = cpu.get_cpustats(vm)
            if not cpustats:
                test.fail("cpu stats command failed to run")

            logging.debug("Check cpustats for emulatorpinned cpu")
            if cpustats[cpus_list[-1]][0] > 0:
                fail = True
                logging.error("Non zero vcputime even with no vcpu pinned")
            if cpustats[cpus_list[-1]][1] == 0:
                fail = True
                logging.error(
                    "emulatortime should be positive as it is pinned")

            logging.debug("Check cpustats for vcpupinned cpu")
            if cpustats[cpus_list[0]][0] == 0:
                fail = True
                logging.error(
                    "vcputime should be positive as vcpu it is pinned")
            if cpustats[cpus_list[0]][1] > 0:
                fail = True
                logging.error(
                    "Non zero emulatortime even with emulator unpinned")

            logging.debug("Check cpustats for non-pinned cpus")
            for index in cpus_list[1:-1]:
                if cpustats[index][2] > 0:
                    fail = True
                    logging.error(
                        "Non zero cputime even with no vcpu,emualtor pinned")

        if condition:
            condn_result = set_condition(vm_name, condition)

        # Action:
        for _ in range(iterations):
            if emulatorpin:
                # To make sure cpu to be offline during host_smt
                hostcpu = cpus_list[-1]
                result = virsh.emulatorpin(vm_name, hostcpu, debug=True)
                libvirt.check_exit_status(result)
                cpustats = cpu.get_cpustats(vm, hostcpu)
                logging.debug(
                    "hostcpu:%s vcputime: %s emulatortime: "
                    "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                    cpustats[hostcpu][1], cpustats[hostcpu][2])
            for vcpu in range(max_vcpu):
                if pintype == "random":
                    hostcpu = random.choice(cpus_list[:-1])
                if pintype == "sequential":
                    hostcpu = cpus_list[vcpu % len(cpus_list[:-1])]
                result = virsh.vcpupin(vm_name,
                                       vcpu,
                                       hostcpu,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                cpustats = cpu.get_cpustats(vm, hostcpu)
                logging.debug(
                    "hostcpu:%s vcputime: %s emulatortime: "
                    "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                    cpustats[hostcpu][1], cpustats[hostcpu][2])
                if config_pin:
                    if cpustats[hostcpu][0] == 0:
                        fail = True
                        logging.error(
                            "vcputime should be positive as vcpu is pinned")
                    if cpustats[hostcpu][1] > 0:
                        fail = True
                        logging.error(
                            "Non zero emulatortime even with emulator unpinned"
                        )
        if condition:
            set_condition(vm_name, condition, reset=True, guestbt=condn_result)

        # Check for guest functional
        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect final guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))
    finally:
        if fail:
            test.fail("Consult previous errors")
        org_xml.sync()
Example #14
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        dest_path = os.path.join(data_dir.get_data_dir(), "copy")

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define',
                             'undefine', 'crash', 'device-removal-failed',
                             'watchdog', 'io-error']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, new_disk, target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "blockcommit":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs)
                    snapshot_path = dom.get_blk_devices()['vda']['source']
                    virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % snapshot_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % disk_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda completed")
                    os.unlink(snapshot_path)
                elif event == "blockcopy":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    dom.undefine()
                    virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % disk_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % dest_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda completed")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name, dimm_xml.xml,
                                        flagstr="--config", **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live)
                    expected_events_list.append("'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " + "%s" % action)
                    if action == 'pause':
                        expected_events_list.append("'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part), shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail("Domain state should still be paused due to I/O error!")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
            if os.path.exists(dest_path):
                os.unlink(dest_path)
        return [(dom.name, event) for event in expected_events_list]
Example #15
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, "''", target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Example #16
0
def run(test, params, env):
    """
    Different vcpupin scenario tests
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest, check for cpu, memory
    3) Do different combinations of vcpupin and in parallel run stress
       if given
    4) Do a optional step based on config
    5) Check guest and host functional

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                if cpu.get_cpu_arch() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt

    vm_name = params.get("main_vm")
    max_vcpu = int(params.get("max_vcpu", 2))
    current_vcpu = int(params.get("current_vcpu", 1))
    vm_cores = int(params.get("limit_vcpu_cores", 2))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    vm = env.get_vm(vm_name)
    condition = params.get("condn", "")
    condn_sleep_sec = int(params.get("condn_sleep_sec", 30))
    pintype = params.get("pintype", "random")
    emulatorpin = "yes" == params.get("emulatorpin", "no")
    config_pin = "yes" == params.get("config_pin", "no")
    iterations = int(params.get("itr", 1))
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    fail = False
    # Destroy the vm
    vm.destroy()
    try:
        cpus_list = cpu.cpu_online_list()
        if len(cpus_list) < 2:
            test.cancel("Need minimum two online host cpus")
        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        if config_pin:
            cpustats = {}
            result = virsh.emulatorpin(vm_name, cpus_list[-1], "config",
                                       debug=True)
            libvirt.check_exit_status(result)
            result = virsh.vcpupin(vm_name, "0", cpus_list[0], "--config",
                                   ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("%s" % detail)

        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect initial guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))

        if config_pin:
            cpustats = utils_hotplug.get_cpustats(vm)
            if not cpustats:
                test.fail("cpu stats command failed to run")

            logging.debug("Check cpustats for emulatorpinned cpu")
            if cpustats[cpus_list[-1]][0] > 0:
                fail = True
                logging.error("Non zero vcputime even with no vcpu pinned")
            if cpustats[cpus_list[-1]][1] == 0:
                fail = True
                logging.error("emulatortime should be positive as it is pinned")

            logging.debug("Check cpustats for vcpupinned cpu")
            if cpustats[cpus_list[0]][0] == 0:
                fail = True
                logging.error("vcputime should be positive as vcpu it is pinned")
            if cpustats[cpus_list[0]][1] > 0:
                fail = True
                logging.error("Non zero emulatortime even with emulator unpinned")

            logging.debug("Check cpustats for non-pinned cpus")
            for index in cpus_list[1:-1]:
                if cpustats[index][2] > 0:
                    fail = True
                    logging.error("Non zero cputime even with no vcpu,emualtor pinned")

        if condition:
            condn_result = set_condition(vm_name, condition)

        # Action:
        for _ in range(iterations):
            if emulatorpin:
                # To make sure cpu to be offline during host_smt
                hostcpu = cpus_list[-1]
                result = virsh.emulatorpin(vm_name, hostcpu, debug=True)
                libvirt.check_exit_status(result)
                cpustats = utils_hotplug.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
            for vcpu in range(max_vcpu):
                if pintype == "random":
                    hostcpu = random.choice(cpus_list[:-1])
                if pintype == "sequential":
                    hostcpu = cpus_list[vcpu % len(cpus_list[:-1])]
                result = virsh.vcpupin(vm_name, vcpu, hostcpu,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                cpustats = utils_hotplug.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
                if config_pin:
                    if cpustats[hostcpu][0] == 0:
                        fail = True
                        logging.error("vcputime should be positive as vcpu is pinned")
                    if cpustats[hostcpu][1] > 0:
                        fail = True
                        logging.error("Non zero emulatortime even with emulator unpinned")
        if condition:
            set_condition(vm_name, condition, reset=True, guestbt=condn_result)

        # Check for guest functional
        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect final guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))
    finally:
        if fail:
            test.fail("Consult previous errors")
        org_xml.sync()
def run(test, params, env):
    """
    Test commands: virsh.vcpupin, virsh.iothreadpin, virsh.emulatorpin.

    Steps:
    - Configure the test VM
    - Check default values are correct
    - Perform virsh vcpupin, check iothreadpin and emulatorpin are not impacted
    - Perform virsh emulatorpin, check vcpupin and iothreadpin are not impacted
    - Perform virsh iotheadpin, check vcpupin and emulatorpin are not impacted
    - Recover test environment
    """

    start_vm = params.get("start_vm", "yes") == "yes"
    change_vcpupin = params.get("change_vcpupin", "no") == 'yes'
    change_emulatorpin = params.get("change_emulatorpin", "no") == 'yes'
    change_iothreadpin = params.get("change_iothreadpin", "no") == 'yes'

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    check_to_skip_case(params, test)

    if vm.is_alive():
        vm.destroy()

    original_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_copy = original_vm_xml.copy()
    prepare_vm(original_vm_xml, params)

    host_cpus = cpu.online_cpus_count()
    cpu_max_index = int(host_cpus) - 1

    try:

        if start_vm:
            vm.start()
        logging.debug("After vm starts, vm xml is:"
                      "%s\n", vm_xml.VMXML.new_from_dumpxml(vm_name))
        logging.debug("Get default vcpupin/emulatorpin/iothreadpin values of the vm")
        vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm_name)
        logging.debug("Check and compare default vcpupin/emulatorpin/iothreadpin values")
        compare_results(vcpupin_result, emulatorpin_result,
                        iothreadpin_result, params.get("iothreadid", '1'), test)
        if change_vcpupin:
            # Change vcpupin, then check vcpupin, and emulatorpin/iothreadpin
            # should not be effected.
            logging.debug("Now change vcpupin value to the guest")
            cpu_list = "0-%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
            virsh.vcpupin(vm_name, "0", cpu_list, None, debug=True, ignore_status=False)
            changed_vcpupin = {'0': cpu_list}
            check_vcpupin(vcpupin_result, changed_vcpupin, vm_name, test)
            check_emulatorpin(emulatorpin_result, None, vm_name, test)
            check_iothreadpin(iothreadpin_result, None, vm_name, test)

        if change_emulatorpin:
            # Change emulatorpin, then check emulatorpin, and vcpupin/iothreadpin
            # should not be effected
            logging.debug("Now change emulatorpin value to the guest")
            vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm_name)
            cpu_list = "0,%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
            virsh.emulatorpin(vm_name, cpu_list, ignore_status=False, debug=True)
            changed_emulatorpin = {'*': cpu_list}
            check_emulatorpin(emulatorpin_result, changed_emulatorpin, vm_name, test)
            check_vcpupin(vcpupin_result, None, vm_name, test)
            check_iothreadpin(iothreadpin_result, None, vm_name, test)

        if change_iothreadpin:
            # Change iothreadpin, then check iothreadpin, and vcpupin/emulatorpin
            # should not be effected
            logging.debug("Now change iothreadpin value to the guest")
            vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm_name)
            cpu_list = "%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
            iothread_id = params.get("iothread_id", "1")
            virsh.iothreadpin(vm_name, iothread_id, cpu_list,  ignore_status=False, debug=True)
            changed_iothreadpin = {iothread_id: cpu_list}
            check_iothreadpin(iothreadpin_result, changed_iothreadpin, vm_name, test)
            check_vcpupin(vcpupin_result, None, vm_name, test)
            check_emulatorpin(emulatorpin_result, None, vm_name, test)
    finally:
        vm_xml_copy.sync()
        if vm.is_alive():
            vm.destroy()
Example #18
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        tmpdir = data_dir.get_tmp_dir()
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        print dom.name
        try:
            for event in events_list:
                if event in ['start', 'restore']:
                    if dom.is_alive():
                        dom.destroy()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                if event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "detach-disk":
                    if not os.path.exists(new_disk):
                        open(new_disk, 'a').close()
                    # Attach disk firstly, this event will not be catched
                    virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs)
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                else:
                    raise error.TestError("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Example #19
0
    def vm_stress_events(self, event, vm, params):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        current_vcpu = int(params.get("smp", 2))
        max_vcpu = int(params.get("vcpu_maxcpus", 2))
        iface_num = params.get("iface_num", '1')
        iface_type = params.get("iface_type", "network")
        iface_model = params.get("iface_model", "virtio")
        iface_source = eval(params.get("iface_source",
                                       "{'network':'default'}"))
        attach_option = params.get("attach_option", "")
        detach_option = params.get("detach_option", "")
        disk_size = params.get("virt_disk_device_size", "1")
        disk_type = params.get("disk_type", "file")
        disk_device = params.get("disk_device", "disk")
        disk_format = params.get("disk_format", "qcow2")
        device_target = params.get("virt_disk_device_target", "vda").split()
        path = params.get("path", "")
        device_source_names = params.get("virt_disk_device_source", "").split()
        disk_driver = params.get("driver_name", "qemu")
        self.ignore_status = params.get("ignore_status", "no") == "yes"
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(current_vcpu):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                result = virsh.emulatorpin(vm.name,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, max_vcpu, "--live", **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {
                        'max_config': max_vcpu,
                        'max_live': max_vcpu,
                        'cur_config': current_vcpu,
                        'cur_live': max_vcpu,
                        'guest_live': max_vcpu
                    }
                    utils_hotplug.check_vcpu_value(vm,
                                                   exp_vcpu,
                                                   option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {
                        'max_config': max_vcpu,
                        'max_live': max_vcpu,
                        'cur_config': current_vcpu,
                        'cur_live': current_vcpu,
                        'guest_live': current_vcpu
                    }
                    utils_hotplug.check_vcpu_value(vm,
                                                   exp_vcpu,
                                                   option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    logging.debug(
                        "VM name: %s , Options for Network attach: %s",
                        vm.name, options)
                    ret = virsh.attach_interface(vm.name,
                                                 options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        logging.debug(
                            "VM name: %s , Options for Network detach: %s",
                            vm.name, options)
                        ret = virsh.detach_interface(vm.name,
                                                     options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(path, vm.name,
                                             device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(
                        disk_type,
                        disk_name,
                        disk_size,
                        disk_format=disk_format)
                    disk.update({
                        "format": disk_format,
                        "source": device_source
                    })
                    disk_xml = Disk(disk_type)
                    disk_xml.device = disk_device
                    disk_xml.driver = {
                        "name": disk_driver,
                        "type": disk_format
                    }
                    ret = virsh.attach_disk(vm.name,
                                            disk["source"],
                                            device_target[disk_num],
                                            attach_option,
                                            debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if detach_option:
                        ret = virsh.detach_disk(vm.name,
                                                device_target[disk_num],
                                                extra=detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(disk_type, disk_name)
            else:
                raise NotImplementedError
            time.sleep(self.itr_sleep_time)
Example #20
0
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ['start', 'restore']:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:"
                                             " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:"
                                             " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:"
                                                 " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:"
                                             " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, '0', '0', **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:"
                                             "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, '0', **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:"
                                             "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, 'a').close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, 'vdb', **virsh_dargs)
                 virsh.detach_disk(vm_name, 'vdb', **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:"
                                             " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
Example #21
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(
                     vm.name, random.choice(self.host_cpu_list), **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {
                     'max_config': self.max_vcpu,
                     'max_live': self.max_vcpu,
                     'cur_config': self.current_vcpu,
                     'cur_live': self.max_vcpu,
                     'guest_live': self.max_vcpu
                 }
                 utils_hotplug.check_vcpu_value(vm,
                                                exp_vcpu,
                                                option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {
                     'max_config': self.max_vcpu,
                     'max_live': self.max_vcpu,
                     'cur_config': self.current_vcpu,
                     'cur_live': self.current_vcpu,
                     'guest_live': self.current_vcpu
                 }
                 utils_hotplug.check_vcpu_value(vm,
                                                exp_vcpu,
                                                option="--live")
         elif "reboot" in event:
             vm.reboot()
         else:
             raise NotImplementedError
Example #22
0
    def vm_stress_events(self, event, vm):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.emulatorpin(vm.name,
                                               random.choice(
                                                   self.host_cpu_list),
                                               **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.max_vcpu,
                                'guest_live': self.max_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.current_vcpu,
                                'guest_live': self.current_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(self.iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (self.iface_type, self.iface_source['network'],
                                self.iface_model, mac, self.attach_option))
                    logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                    ret = virsh.attach_interface(vm.name, options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if self.detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (self.iface_type, mac, self.detach_option))
                        logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                        ret = virsh.detach_interface(vm.name, options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(self.device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(
                        self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                    disk.update({"format": self.disk_format,
                                 "source": device_source})
                    disk_xml = Disk(self.disk_type)
                    disk_xml.device = self.disk_device
                    disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                    ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if self.detach_option:
                        ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(self.disk_type, disk_name)
            else:
                raise NotImplementedError
Example #23
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        tmpdir = data_dir.get_tmp_dir()
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        print(dom.name)
        try:
            for event in events_list:
                if event in ['start', 'restore']:
                    if dom.is_alive():
                        dom.destroy()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                if event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "detach-disk":
                    if not os.path.exists(new_disk):
                        open(new_disk, 'a').close()
                    # Attach disk firstly, this event will not be catched
                    virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs)
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Example #24
0
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ["start", "restore"]:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:" " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, "0", "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, "a").close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs)
                 virsh.detach_disk(vm_name, "vdb", **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:" " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
Example #25
0
def test_change_vcpupin_emulatorpin_iothreadpin(test, guest_xml, cpu_max_index, vm, params):
    """
    - Check default values are correct
    - Perform virsh vcpupin, check iothreadpin and emulatorpin are not impacted
    - Perform virsh emulatorpin, check vcpupin and iothreadpin are not impacted
    - Perform virsh iotheadpin, check vcpupin and emulatorpin are not impacted

    :param test: test object
    :param guest_xml: vm xml
    :param cpu_max_index: int, cpu maximum index on host
    :param vm: vm instance
    :param params: test dict
    :return: None
    """
    def _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                      changed_vcpupin, changed_emulatorpin, changed_iothreadpin):
        """
        Internal common function to check the command result

        :param vcpupin_result: dict, the vcpupin command result
        :param emulatorpin_result: dict, the emulatorpin command result
        :param iothreadpin_result: dict, the iothreadpin command result
        :param changed_vcpupin: dict, the changed value for vcpupin
        :param changed_emulatorpin: dict, the changed value for emulatorpin
        :param changed_iothreadpin: dict, the changed value for iothreadpin
        :return: None
        """
        check_vcpupin(vcpupin_result, changed_vcpupin, vm.name, test)
        check_emulatorpin(emulatorpin_result, changed_emulatorpin, vm.name, test)
        check_iothreadpin(iothreadpin_result, changed_iothreadpin, vm.name, test)

    prepare_vm(guest_xml, params)
    vm.start()
    logging.debug("After vm starts, vm xml is:"
                  "%s\n", vm_xml.VMXML.new_from_dumpxml(vm.name))

    logging.debug("Get default vcpupin/emulatorpin/iothreadpin values of the vm")
    vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm.name)
    logging.debug("Check and compare default vcpupin/emulatorpin/iothreadpin values")
    compare_results(vcpupin_result, emulatorpin_result,
                    iothreadpin_result, params.get("iothread_id"), test)

    # Change vcpupin, then check vcpupin, and emulatorpin/iothreadpin
    # should not be effected.
    logging.debug("Now change vcpupin value to the guest")
    cpu_list = "0-%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
    virsh.vcpupin(vm.name, "0", cpu_list, None, debug=True, ignore_status=False)
    changed_vcpupin = {'0': cpu_list}
    _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                  changed_vcpupin, None, None)

    # Change emulatorpin, then check emulatorpin, and vcpupin/iothreadpin
    # should not be effected
    logging.debug("Now change emulatorpin value to the guest")
    vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm.name)
    cpu_list = "0,%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
    virsh.emulatorpin(vm.name, cpu_list, ignore_status=False, debug=True)
    changed_emulatorpin = {'*': cpu_list}
    _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                  None, changed_emulatorpin, None)

    # Change iothreadpin, then check iothreadpin, and vcpupin/emulatorpin
    # should not be effected
    logging.debug("Now change iothreadpin value to the guest")
    vcpupin_result, emulatorpin_result, iothreadpin_result = get_current_values(vm.name)
    cpu_list = "%s" % (cpu_max_index - 1) if cpu_max_index > 1 else "0"
    iothread_id = params.get("iothread_id")
    virsh.iothreadpin(vm.name, iothread_id, cpu_list, ignore_status=False, debug=True)
    changed_iothreadpin = {iothread_id: cpu_list}
    _check_result(vcpupin_result, emulatorpin_result, iothreadpin_result,
                  None, None, changed_iothreadpin)